Пример #1
0
    def train(self, batch_size=1024, epochs=2, initial_epoch=0, verbose=1):
        """Train SDNE model.

        Parameters
        ----------
        batch_size : int, optional (default : 1024)

        epochs : int, optional (default : 2)

        inital_epoch : int, optional (default : 0)

        verbose : int, optional (default : 1)

        """
        from tensorflow.python.keras.callbacks import History

        if batch_size >= self.node_size:
            if batch_size > self.node_size:
                print('batch_size({0}) > node_size({1}),set batch_size = {1}'.format(
                    batch_size, self.node_size))
                batch_size = self.node_size
            return self.model.fit([self.A.todense(), self.L.todense()], [self.A.todense(), self.L.todense()],
                                  batch_size=batch_size, epochs=epochs, initial_epoch=initial_epoch, verbose=verbose,
                                  shuffle=False, )
        else:
            steps_per_epoch = (self.node_size - 1) // batch_size + 1
            hist = History()
            hist.on_train_begin()
            logs = {}
            for epoch in range(initial_epoch, epochs):
                start_time = time.time()
                losses = np.zeros(3)
                for i in range(steps_per_epoch):
                    index = np.arange(
                        i * batch_size, min((i + 1) * batch_size, self.node_size))
                    A_train = self.A[index, :].todense()
                    L_mat_train = self.L[index][:, index].todense()
                    inp = [A_train, L_mat_train]
                    batch_losses = self.model.train_on_batch(inp, inp)
                    losses += batch_losses
                losses = losses / steps_per_epoch

                logs['loss'] = losses[0]
                logs['2nd_loss'] = losses[1]
                logs['1st_loss'] = losses[2]
                epoch_time = int(time.time() - start_time)
                # TODO: Fixed the bug derivated by the following code in TF2:
                # hist.on_epoch_end(epoch, logs)
                if verbose > 0:
                    print('Epoch {0}/{1}'.format(epoch + 1, epochs))
                    print('{0}s - loss: {1: .4f} - 2nd_loss: {2: .4f} - 1st_loss: {3: .4f}'.format(
                        epoch_time, losses[0], losses[1], losses[2]))
            return hist
Пример #2
0
    def load_from_dir(cls, directory: str):
        """
        Load an instance of this class from a directory, such that it was dumped to
        using :func:`gordo_components.model.models.KerasBaseEstimator.save_to_dir`

        Parameters
        ----------
        directory: str
            The directory to save this model to, must have write access

        Returns
        -------
        None
        """
        with open(path.join(directory, "params.json"), "r") as f:
            params = json.load(f)
        obj = cls(**params)
        model_path = path.join(directory, "model.h5")
        if path.exists(model_path):
            obj.model = load_model(model_path, compile=False)
            history_file = path.join(directory, "history.pkl")
            if path.isfile(history_file):
                from tensorflow.python.keras.callbacks import History

                obj.model.history = History()
                with open(history_file, "rb") as hist_f:
                    history, params, epoch = pickle.load(hist_f)
                obj.model.history.history = history
                obj.model.history.params = params
                obj.model.history.epoch = epoch

        return obj
Пример #3
0
def main():
    parser = build_parser()
    options = parser.parse_args()
    check_opts(options)

    dataset_DIR = os.path.join(DATA_DIR, options.dataset)

    data_dict = dd.io.load(os.path.join(dataset_DIR, 'data.h5'))

    x_train, y_train, x_test, y_test = data_dict['x_train'], data_dict[
        'y_train'], data_dict['x_test'], data_dict['y_test']

    validation_data = (x_test, y_test)

    kwargs = {
        'MAX_SEQUENCE_LENGTH': x_train.shape[1],
        'num_classes': y_train.shape[1],
        'num_words': data_dict['num_words'],
        'dropout_rate': options.dropout_rate,
        'flag': options.mode
    }

    if 'rand' in options.mode:
        kwargs['embedding_weights'] = None

    else:
        fname_wordvec = 'glove_'

        if options.wordvectors:
            fname_wordvec = 'word2vec_'

        kwargs['embedding_weights'] = np.load(
            os.path.join(dataset_DIR, fname_wordvec + 'embedding.npy'))

    text_model = TextCNN(**kwargs)

    model = model_placement(text_model, num_gpus=options.num_gpus)
    model.compile(loss='categorical_crossentropy',
                  metrics=['accuracy'],
                  optimizer=SGD(lr=options.lr))

    num_examples = range(x_train.shape[0])
    history = History()
    callbacks = [history]
    num_epochs = options.num_epochs

    if options.debug:
        validation_data = None
        num_examples = range(1000)
        callbacks = None
        num_epochs = 5

    train_data = (x_train[num_examples], y_train[num_examples])

    model.fit(x=x_train[num_examples],
              y=y_train[num_examples],
              batch_size=options.batch_size,
              callbacks=callbacks,
              epochs=num_epochs,
              validation_data=validation_data)
Пример #4
0
def example_Cov_auto():
    simutation_parameters = {
        "PWM_file_1":
        "/home/qan/Desktop/DeepEpitif/DeepMetif/JASPAR2018_CORE_vertebrates_non-redundant_pfms_jaspar/MA0835.1.jaspar",
        "PWM_file_2":
        "/home/qan/Desktop/DeepEpitif/DeepMetif/JASPAR2018_CORE_vertebrates_non-redundant_pfms_jaspar/MA0515.1.jaspar",
        "seq_length": 1024,
        "center_pos": 50,
        "interspace": 10
    }

    [train_X, train_Y, test_X,
     test_Y] = get_simulated_dataset(parameters=simutation_parameters,
                                     train_size=20000,
                                     test_size=5000)

    #################################
    # build autoencoder model

    model = CAC()

    model.compile(optimizer='adam', loss='mse')

    history_autoencoder = model.fit(x=train_X,
                                    y=train_X,
                                    batch_size=10,
                                    epochs=20,
                                    verbose=1,
                                    callbacks=[History()],
                                    validation_data=(test_X, test_X))
Пример #5
0
    def fit_generator(self, generator, epochs=1,
                      validation_data=None,
                      callbacks=None,
                      verbose=True):
        method = self._model.optimizer.method
        x0 = self._collect_weights()
        history = History()
        _callbacks = [BaseLogger(stateful_metrics=self._model.metrics_names)]
        _callbacks += (callbacks or []) + [history]
        callback_list = CallbackList(_callbacks)
        callback_list.set_model(self._model)
        callback_list.set_params({
            'epochs': epochs,
            'verbose': False,
            'metrics': list(self._model.metrics_names),
        })
        state = {
            'epoch': 0,
            'verbose': verbose,
            'callbacks': callback_list,
            'in_epoch': False,
            'epoch_logs': {},
        }
        min_options = {
            'maxiter': epochs,
            'maxfun': epochs*10,
            'ftol': 1e-10,
            'gtol': 1e-10,
            'eps': 1e-8,
        }

        val_generator = None
        if validation_data is not None:
            if isinstance(validation_data, keras.utils.Sequence):
                val_generator = validation_data
            elif isinstance(validation_data, tuple) and len(validation_data) == 2:
                val_generator = GeneratorWrapper(*validation_data)

        def on_iteration_end(xk):
            cb = state['callbacks']
            if val_generator is not None:
                self._validate(xk, val_generator, state)
            cb.on_epoch_end(state['epoch'], state['epoch_logs'])
            # if state['verbose']:
            #     epoch_logs = state['epoch_logs']
            #     print('epoch: ', state['epoch'],
            #           ', '.join([' {0}: {1:.3e}'.format(k, v) for k, v in epoch_logs.items()]))
            state['epoch'] += 1
            state['in_epoch'] = False
            state['epoch_logs'] = {}

        callback_list.on_train_begin()
        result = minimize(
            self._fun_generator, x0, method=method, jac=True, options=min_options,
            callback=on_iteration_end, args=(generator, state))
        self._update_weights(result['x'])
        callback_list.on_train_end()
        return history
Пример #6
0
    def __getstate__(self):

        state = self.__dict__.copy()

        if hasattr(self, "model") and self.model is not None:
            buf = io.BytesIO()
            with h5py.File(buf, compression="lzf", mode="w") as h5:
                save_model(self.model, h5, overwrite=True, save_format="h5")
                buf.seek(0)
                state["model"] = buf
            if hasattr(self.model, "history"):
                from tensorflow.python.keras.callbacks import History

                history = History()
                history.history = self.model.history.history
                history.params = self.model.history.params
                history.epoch = self.model.history.epoch
                state["history"] = history
        return state
Пример #7
0
    def train(self, batch_size=1024, epochs=1, initial_epoch=0, verbose=1):
        if batch_size >= self.node_size:
            if batch_size > self.node_size:
                print('batch_size({0}) > node_size({1}),set batch_size = {1}'.format(
                    batch_size, self.node_size))
                batch_size = self.node_size
            return self.model.fit([self.A, self.L], [self.A, self.L], batch_size=batch_size, epochs=epochs, initial_epoch=initial_epoch, verbose=verbose, shuffle=False)
        else:
            steps_per_epoch = (self.node_size - 1) // batch_size + 1
            hist = History()
            hist.on_train_begin()
            logs = {}
            for epoch in range(initial_epoch, epochs):
                start_time = time.time()
                losses = np.zeros(3)
                for i in range(steps_per_epoch):
                    index = np.arange(i * batch_size, min((i + 1) * batch_size, self.node_size))

                    train_A = self.A[index, :]  # batch_size * node_size
                    train_L = self.L[index][:, index] # L矩阵是一个对角矩阵

                    inp = [train_A, train_L]
                    batch_losses = self.model.train_on_batch(inp, inp)
                    losses += batch_losses
                losses = losses / steps_per_epoch
                logs['loss'] = losses[0]
                logs['2nd_loss'] = losses[1]
                logs['1st_loss'] = losses[2]
                epoch_time = int(time.time() - start_time)
                hist.on_epoch_end(epoch, logs)
                if verbose > 0:
                    print('Epoch {0}/{1}'.format(epoch + 1, epochs))
                    print('{0}s - loss: {1: .4f} - 2nd_loss: {2: .4f} - 1st_loss: {3: .4f}'.format(
                        epoch_time, losses[0], losses[1], losses[2]))
            return hist
Пример #8
0
def example_generator():

    separate_dataset("regions_for_learning_with_head.clean.equal_size.bed",
                     ["chr1"], "valid.bed")
    separate_dataset("regions_for_learning_with_head.clean.equal_size.bed",
                     ["chr2", "chr19"], "test.bed")
    separate_dataset("regions_for_learning_with_head.clean.equal_size.bed", [
        "chr3", "chr4", "chr5", "chr6", "chr7", "chr8", "chr9", "chr10",
        "chr11", "chr12", "chr13", "chr14", "chr15", "chr16", "chr17", "chr18",
        "chr20", "chr21", "chr22"
    ], "train.bed")

    train_gen = DataGenerator(
        data_path="train.bed",
        ref_fasta=
        "../GSM1865005_allC.MethylC-seq_WT_rods_rep1.tsv/GRCm38.primary_assembly.genome.fa.gz",
        genome_size_file="./mm10.genome.size",
        epi_track_files=["MethylC-seq_WT_cones_rep1_CpG.clean.plus.sorted.bw"],
        tasks=["TARGET"],
        upsample=True,
        upsample_ratio=0.3)

    valid_gen = DataGenerator(
        data_path="valid.bed",
        ref_fasta=
        "../GSM1865005_allC.MethylC-seq_WT_rods_rep1.tsv/GRCm38.primary_assembly.genome.fa.gz",
        genome_size_file="./mm10.genome.size",
        epi_track_files=["MethylC-seq_WT_cones_rep1_CpG.clean.plus.sorted.bw"],
        tasks=["TARGET"],
        upsample=True,
        upsample_ratio=0.3)

    model = initialize_model()

    trainning_history = model.fit_generator(
        train_gen,
        validation_data=valid_gen,
        steps_per_epoch=5000,
        validation_steps=500,
        epochs=10,
        verbose=1,
        use_multiprocessing=False,
        workers=4,
        max_queue_size=50,
        callbacks=[
            History(),
            ModelCheckpoint("ATAC_peak_Classification_positive_constrain.h5",
                            monitor='val_loss',
                            verbose=1,
                            save_best_only=True,
                            mode='min')
        ])
    def __init__(self, GPU, ds_class_name):
        self.test_score = None

        self.X_train = None
        self.y_train = None
        self.X_test = None
        self.y_test = None

        self.dataset = ds_class_name
        self.num_channels = ds_class_name.get_num_channels()
        self.img_height = ds_class_name.get_img_height()
        self.img_width = ds_class_name.get_img_width()

        self.default_num_labels = None

        self.opt_GPU = GPU

        self.history = History()
Пример #10
0
    def train(self, batch_size=1024, epochs=1, initial_epoch=0, verbose=1):
        print(self.model.summary())
        # return
        if batch_size >= self.node_size:
            if batch_size > self.node_size:
                print('batch_size({0}) > node_size({1}), set batch_size = {1}'.
                      format(batch_size, self.node_size))
                batch_size = self.node_size
            return self.model.fit(
                [self.A.todense(), self.L.todense()],
                [self.A.todense(), self.L.todense()],
                batch_size=batch_size,
                epochs=epochs,
                initial_epoch=initial_epoch,
                verbose=verbose,
                shuffle=False,
            )

        else:
            steps_per_epoch = (self.node_size - 1) // batch_size + 1
            hist = History()
            hist.on_train_begin()
            logs = {}
            for epoch in range(initial_epoch, epochs):
                start_time = time.time()
                losses = np.zeros(3)
                for i in range(steps_per_epoch):
                    index = np.arange(
                        i * batch_size,
                        min((i + 1) * batch_size, self.node_size))
                    A_train = self.A[index, :].todense()

                    L_mat_train = self.L[index][:, index].todense()
                    inp = [A_train, L_mat_train]
                    print("A_train: ", A_train)
                    print("L_mat_train: ", L_mat_train)
                    batch_losses = self.model.train_on_batch(inp, inp)
                    losses += batch_losses
                losses = losses / steps_per_epoch

                logs['loss'] = losses[0]
                logs['2nd_loss'] = losses[1]
                logs['1st_loss'] = losses[2]

                epoch_time = int(time.time() - start_time)
                hist.on_epoch_begin(epoch, logs)
                if verbose > 0:
                    print('Epoch {0}/{1}'.format(epoch + 1, epochs))
                print(
                    '{0}s - loss: {1: .4f} - 2nd_loss: {2: .4f} - 1st_lost: {3: .4f}'
                    .format(epoch_time, losses[0], losses[1], losses[2]))
        return hist
Пример #11
0
def generate_tf_history(model, hyperparameters, accuracy, loss, val_accuracy, val_loss, val_precision, val_recall):
    H = History()
    H.set_model(model)
    H.set_params({
        'batch_size': hyperparameters.batch_size,
        'epochs': hyperparameters.epochs,
        'metrics': ['loss', 'accuracy', 'val_loss', 'val_accuracy', 'val_precision', 'val_recall']
    })
    history = {'loss': [], 'accuracy': [], 'val_loss': [], 'val_accuracy': [],'val_precision': [], 'val_recall': []}
    history['loss'] = loss
    history['accuracy'] = accuracy
    history['val_loss'] = val_loss
    history['val_accuracy'] = val_accuracy
    history['val_precision'] = val_precision
    history['val_recall'] = val_recall
    H.history = history
    return H
Пример #12
0
    def __init__(self, network, device=None):
        """Init

        :param network: pytorch network to train or evaluate
        :type network: torch.nn.Module
        :param device: device to train the network on, e.g. 'cuda:0'
        :type device: str
        """

        self.network = network
        self.device = device

        self._compiled = False
        # these are set in the `compile` method
        self.optimizer = None
        self.loss = None

        self.history = History()
        self.stop_training = False
def main():
    parser = build_parser()
    options = parser.parse_args()
    check_opts(options)
    fname = filename(options)

    data = CreateDataset()

    create_data_method = getattr(data, options.dataset)

    train_data, validation_data, params = create_data_method()

    history_DIR = 'model_history'
    data_DIR = os.path.join(history_DIR, options.dataset)
    experiment_DIR = os.path.join(data_DIR, fname)

    if (not os.path.exists(history_DIR) and not options.debug):
        os.makedirs(history_DIR)

    if (not os.path.exists(data_DIR) and not options.debug):
        os.makedirs(data_DIR)
        os.makedirs(experiment_DIR)

    kwargs = {
        "input_shape": params['input_shape'],
        "classes": params['num_classes'],
        "is_five_layers": options.is_five_layers,
        "activation": options.activation,
        "is_normalized": options.is_normalized
    }

    model = fully_connected_neural_net(**kwargs)

    p_model = model_placement(model=model, num_gpus=options.num_gpus)

    p_model.compile(loss='categorical_crossentropy',
                    metrics=['accuracy'],
                    optimizer=SGD(lr=options.lr))

    p_model.summary()

    num_epochs = params['num_epochs']

    if options.dataset == 'shapeset':
        steps_per_epoch = params['steps_per_epoch']

        history = History()
        GAStore = GradientActivationStore(DIR=experiment_DIR,
                                          num_classes=params['num_classes'],
                                          record_every=1,
                                          only_weights=True)

        callbacks = [history, GAStore]

        if options.debug:
            num_epochs = 5
            steps_per_epoch = 1000
            callbacks = None

        p_model.fit_generator(generator=train_data,
                              steps_per_epoch=steps_per_epoch,
                              epochs=num_epochs,
                              validation_data=validation_data,
                              callbacks=callbacks)
    else:
        x, y = train_data
        history = History()
        callbacks = [history]

        if options.debug:
            num_epochs = 5
            x = x[:1000]
            y = y[:1000]
            callbacks = None

        p_model.fit(x=x,
                    y=y,
                    batch_size=options.batch_size,
                    epochs=num_epochs,
                    callbacks=callbacks,
                    validation_data=validation_data)

    if not options.debug:
        dd.io.save(os.path.join(experiment_DIR, 'history.h5'), history.history)
Пример #14
0
    def fit_hrl(self,
                env,
                nb_steps,
                random_start_step_policy,
                callbacks=None,
                verbose=1,
                visualize=False,
                pre_warm_steps=0,
                log_interval=100,
                save_interval=1,
                nb_max_episode_steps=None):

        if not self.compiled:
            raise RuntimeError(
                'Your tried to fit your agent but it hasn\'t been'
                ' compiled yet. Please call `compile()` before `fit()`.')

        self.training = True
        self.turn_left_agent.training = True
        self.go_straight_agent.training = True
        self.turn_right_agent.training = True

        callbacks = [] if not callbacks else callbacks[:]

        if verbose == 1:
            callbacks += [TrainIntervalLogger(interval=log_interval)]
        elif verbose > 1:
            callbacks += [TrainEpisodeLogger()]
        if visualize:
            callbacks += [Visualizer()]

        parent_dir = os.path.dirname(os.path.dirname(__file__))
        callbacks += [FileLogger(filepath=parent_dir + os.sep + 'log.json')]
        callbacks += [
            ModelIntervalCheckpoint(filepath=parent_dir +
                                    '/checkpoints/model_step{step}.h5f',
                                    interval=save_interval,
                                    verbose=1)
        ]
        history = History()
        callbacks += [history]
        callbacks = CallbackList(callbacks)
        callbacks.set_model(self)
        callbacks._set_env(env)
        params = {
            'nb_steps': nb_steps,
        }
        callbacks.set_params(params)
        self._on_train_begin()
        callbacks.on_train_begin()

        episode = np.int16(0)
        self.step = np.int16(0)
        self.turn_left_agent.step = np.int16(0)
        self.go_straight_agent.step = np.int16(0)
        self.turn_right_agent.step = np.int16(0)
        observation = env.encoded_obs
        episode_reward = None
        episode_step = None
        did_abort = False

        # warm steps
        print('pre warming up:')
        for _ in range(pre_warm_steps):
            normed_action = random_start_step_policy()
            recent_action = normed_action
            recent_observation = observation  # put in normed action and unprocessed observation
            action = self.processor.process_action(
                recent_action)  # [0/1/2, goal_delta_x, acc]

            callbacks.on_action_begin(action)
            observation, reward, done, info = env.step(action)
            observation = deepcopy(observation)
            if self.processor is not None:
                observation, reward, done, info = self.processor.process_step(
                    observation, reward, done, info)
            callbacks.on_action_end(action)

            self.memory.append(recent_observation,
                               recent_action[0],
                               reward,
                               done,
                               training=self.training)
            if recent_action[0] == 0:
                left_obs = np.column_stack(
                    (recent_observation[:, :30], recent_observation[:, -8:],
                     np.tile(
                         np.array([1, 0, 0]),
                         (recent_observation.shape[0], 1))))  # 30 + 8 + 3 = 41
                lower_action = recent_action[1:]
                self.turn_left_agent.memory.append(left_obs,
                                                   lower_action,
                                                   reward,
                                                   1,
                                                   training=self.training)
            elif recent_action[0] == 1:
                straight_obs = np.column_stack(
                    (deepcopy(recent_observation),
                     np.tile(np.array([0, 1, 0]),
                             (recent_observation.shape[0], 1))))  # 56 + 3 = 59
                lower_action = recent_action[1:]
                self.go_straight_agent.memory.append(straight_obs,
                                                     lower_action,
                                                     reward,
                                                     1,
                                                     training=self.training)
            else:
                right_obs = np.column_stack(
                    (recent_observation[:, 18:],
                     np.tile(
                         np.array([0, 0, 1]),
                         (recent_observation.shape[0], 1))))  # 56- 18 + 3 = 41
                lower_action = recent_action[1:]
                self.turn_right_agent.memory.append(right_obs,
                                                    lower_action,
                                                    reward,
                                                    1,
                                                    training=self.training)
            print('————————————————————————————————————————')
            print({
                'upper_memory_len: ': self.memory.nb_entries,
                'left_memory_len: ': self.turn_left_agent.memory.nb_entries,
                'straight_memory_len: ':
                self.go_straight_agent.memory.nb_entries,
                'right_memory_len: ': self.turn_right_agent.memory.nb_entries
            })
            print('————————————————————————————————————————')
            # TODO: always has a point is not done, but there would be only one bad point in the buffer
            if done:

                def random_init_state(flag=True):
                    init_state = [-800, -150 - 3.75 * 5 / 2, 5, 0]
                    if flag:
                        x = np.random.random() * 1000 - 800
                        lane = np.random.choice([0, 1, 2, 3])
                        y_fn = lambda lane: \
                        [-150 - 3.75 * 7 / 2, -150 - 3.75 * 5 / 2, -150 - 3.75 * 3 / 2, -150 - 3.75 * 1 / 2][lane]
                        y = y_fn(lane)
                        v = np.random.random() * 25
                        heading = 0
                        init_state = [x, y, v, heading]
                    return init_state

                observation = deepcopy(
                    env.reset(init_state=random_init_state(flag=True)))
                if self.processor is not None:
                    observation = self.processor.process_observation(
                        observation)

        observation = None

        try:
            while self.step < nb_steps:
                if observation is None:  # start of a new episode
                    callbacks.on_episode_begin(episode)
                    episode_step = np.int16(0)
                    episode_reward = np.float32(0)

                    # Obtain the initial observation by resetting the environment.
                    self.reset_states()

                    def random_init_state(flag=True):
                        init_state = [-800, -150 - 3.75 * 5 / 2, 5, 0]
                        if flag:
                            x = np.random.uniform(0, 1) * 1000 - 800
                            lane = np.random.choice([0, 1, 2, 3])
                            y_fn = lambda lane: [
                                -150 - 3.75 * 7 / 2, -150 - 3.75 * 5 / 2, -150
                                - 3.75 * 3 / 2, -150 - 3.75 * 1 / 2
                            ][lane]
                            y = y_fn(lane)
                            v = np.random.uniform(0, 1) * 25
                            heading = 0
                            init_state = [x, y, v, heading]
                        return init_state

                    observation = deepcopy(
                        env.reset(init_state=random_init_state()))
                    if self.processor is not None:
                        observation = self.processor.process_observation(
                            observation)
                    assert observation is not None

                # At this point, we expect to be fully initialized.
                assert episode_reward is not None
                assert episode_step is not None
                assert observation is not None

                # Run a single step.
                callbacks.on_step_begin(episode_step)
                # This is were all of the work happens. We first perceive and compute the action
                # (forward step) and then use the reward to improve (backward step).
                action = self.forward(observation)  # this is normed action
                action = self.processor.process_action(
                    action)  # this is processed action for env
                done = False

                callbacks.on_action_begin(action)
                observation, reward, done, info = env.step(action)
                observation = deepcopy(observation)
                if self.processor is not None:
                    observation, reward, done, info = self.processor.process_step(
                        observation, reward, done, info)
                callbacks.on_action_end(action)

                if nb_max_episode_steps and episode_step >= nb_max_episode_steps - 1:
                    # Force a terminal state.
                    done = True
                metrics = self.backward(reward, terminal=done)
                episode_reward += reward
                step_logs = {
                    'action': action,  # processed action
                    'observation': observation,  # true obs
                    'reward': reward,
                    'metrics': metrics,
                    'episode': episode
                    # 'info': info,
                }

                callbacks.on_step_end(episode_step, step_logs)
                episode_step += 1
                self.step += 1
                self.turn_left_agent.step += 1
                self.go_straight_agent.step += 1
                self.turn_right_agent.step += 1

                memory_len = [
                    self.turn_left_agent.memory.nb_entries,
                    self.go_straight_agent.memory.nb_entries,
                    self.turn_right_agent.memory.nb_entries
                ]

                if done:
                    episode_logs = {
                        'episode_reward': episode_reward,
                        'nb_episode_steps': episode_step,
                        'nb_steps': self.step,
                        'memory_len': memory_len
                    }
                    callbacks.on_episode_end(episode, episode_logs)

                    episode += 1
                    observation = None
                    episode_step = None
                    episode_reward = None
        except KeyboardInterrupt:
            # We catch keyboard interrupts here so that training can be be safely aborted.
            # This is so common that we've built this right into this function, which ensures that
            # the `on_train_end` method is properly called.
            did_abort = True
        callbacks.on_train_end(logs={'did_abort': did_abort})
        self._on_train_end()

        return history
Пример #15
0
    def test_hrl(self,
                 env,
                 nb_episodes=1,
                 callbacks=None,
                 visualize=True,
                 nb_max_episode_steps=None,
                 verbose=2,
                 model_path=None):

        if model_path is not None:
            self.load_weights(model_path)
        if not self.compiled:
            raise RuntimeError(
                'Your tried to test your agent but it hasn\'t been '
                'compiled yet. Please call `compile()` before `test()`.')

        self.training = False
        self.turn_left_agent.training = False
        self.go_straight_agent.training = False
        self.turn_right_agent.training = False
        self.step = np.int16(0)
        self.turn_left_agent.step = np.int16(0)
        self.go_straight_agent.step = np.int16(0)
        self.turn_right_agent.step = np.int16(0)

        callbacks = [] if not callbacks else callbacks[:]

        if verbose >= 1:
            callbacks += [TestLogger()]
        if visualize:
            callbacks += [Visualizer()]
        history = History()
        callbacks += [history]
        callbacks = CallbackList(callbacks)
        callbacks.set_model(self)
        callbacks._set_env(env)
        params = {
            'nb_episodes': nb_episodes,
        }
        callbacks.set_params(params)
        self._on_test_begin()
        callbacks.on_train_begin()
        for episode in range(nb_episodes):
            callbacks.on_episode_begin(episode)
            episode_reward = 0.
            episode_step = 0

            # Obtain the initial observation by resetting the environment.
            self.reset_states()

            def random_init_state(flag=True):
                init_state = [-800, -150 - 3.75 * 5 / 2, 5, 0]
                if flag:
                    x = np.random.random() * 1000 - 800
                    lane = np.random.choice([0, 1, 2, 3])
                    y_fn = lambda lane: \
                    [-150 - 3.75 * 7 / 2, -150 - 3.75 * 5 / 2, -150 - 3.75 * 3 / 2, -150 - 3.75 * 1 / 2][lane]
                    y = y_fn(lane)
                    v = np.random.random() * 25
                    heading = 0
                    init_state = [x, y, v, heading]
                return init_state

            observation = deepcopy(
                env.reset(init_state=random_init_state(flag=True)))
            assert observation is not None

            # Run the episode until we're done.
            done = False
            while not done:
                callbacks.on_step_begin(episode_step)

                action = self.forward(observation)
                action = self.processor.process_action(action)
                reward = 0.
                callbacks.on_action_begin(action)
                observation, reward, done, info = env.step(action)
                observation = deepcopy(observation)
                callbacks.on_action_end(action)
                if nb_max_episode_steps and episode_step >= nb_max_episode_steps - 1:
                    done = True
                self.backward(reward, terminal=done)
                episode_reward += reward

                step_logs = {
                    'action': action,
                    'observation': observation,
                    'reward': reward,
                    'episode': episode
                }
                callbacks.on_step_end(episode_step, step_logs)
                episode_step += 1
                self.step += 1
                self.turn_left_agent.step += 1
                self.go_straight_agent.step += 1
                self.turn_right_agent.step += 1

            # We are in a terminal state but the agent hasn't yet seen it. We therefore
            # perform one more forward-backward call and simply ignore the action before
            # resetting the environment. We need to pass in `terminal=False` here since
            # the *next* state, that is the state of the newly reset environment, is
            # always non-terminal by convention.
            self.forward(observation)
            self.backward(0., terminal=False)

            # Report end of episode.
            episode_logs = {
                'episode_reward': episode_reward,
                'nb_steps': episode_step,
            }
            callbacks.on_episode_end(episode, episode_logs)
        callbacks.on_train_end()
        self._on_test_end()

        return history
Пример #16
0
def CAC(input_shape=(1, 1024, 4)):

    input_layer = Input(shape=input_shape)

    x = Conv2D(32,
               15,
               strides=4,
               padding='same',
               activation='relu',
               name='conv1',
               input_shape=input_shape)(input_layer)

    x = Conv2D(64,
               15,
               strides=4,
               padding='same',
               activation='relu',
               name='conv2',
               input_shape=input_shape)(x)

    x = Conv2D(128,
               11,
               strides=4,
               padding='same',
               activation='relu',
               name='conv3',
               input_shape=input_shape)(x)

    x = Flatten()(x)

    encoded = Dense(units=10, name='embedding')(x)

    ###

    x = Dense(units=2048, activation='relu')(encoded)

    x = Reshape((1, 16, 128))(x)

    x = Conv2DTranspose(64,
                        15,
                        strides=(1, 4),
                        padding='same',
                        activation='relu',
                        name='deconv3')(x)

    x = Conv2DTranspose(32,
                        15,
                        strides=(1, 4),
                        padding='same',
                        activation='relu',
                        name='deconv2')(x)

    decoded = Conv2DTranspose(4,
                              15,
                              strides=(1, 4),
                              padding='same',
                              name='deconv1')(x)

    ###

    autoencoder = Model(input_layer, decoded)

    autoencoder.summary()

    encoder = Model(input_layer, encoded, name='encoder')

    encoder.summary()

    simutation_parameters = {
        "PWM_file_1": "./MA0835.1.jaspar",
        "PWM_file_2": "./MA0515.1.jaspar",
        "seq_length": 1024,
        "center_pos": 50,
        "interspace": 10
    }

    [train_X, train_Y, test_X,
     test_Y] = get_simulated_dataset(parameters=simutation_parameters,
                                     train_size=20000,
                                     test_size=5000)

    print(train_X.shape)

    #################################
    # build autoencoder model

    autoencoder.compile(optimizer='adam', loss='mse')

    history_autoencoder = autoencoder.fit(x=train_X,
                                          y=train_X,
                                          batch_size=64,
                                          epochs=10,
                                          verbose=1,
                                          callbacks=[History()],
                                          validation_data=(test_X, test_X))

    encoded_imgs = encoder.predict(test_X)

    print(encoded_imgs.shape)

    colors = ['#e41a1c', '#377eb8', '#4daf4a']

    X_embedded = TSNE(n_components=2).fit_transform(encoded_imgs)

    plt.scatter(X_embedded[:, 0],
                X_embedded[:, 1],
                c=np.array(colors)[test_Y.flatten()])
    plt.colorbar()
    plt.show()
Пример #17
0
def CAC_2(input_shape=(1, 1024, 4)):

    model = Sequential()

    model.add(
        Conv2D(10,
               11,
               strides=1,
               padding='same',
               activation='relu',
               name='conv1',
               input_shape=input_shape))

    #model.add(MaxPooling2D(pool_size=(1,4)))

    model.add(Conv2D(10, 11, strides=1, padding='same', activation='relu'))

    model.add(MaxPooling2D(pool_size=(1, 64)))

    model.add(Conv2D(10, 11, strides=1, padding='same', activation='relu'))

    model.add(MaxPooling2D(pool_size=(1, 4)))

    model.add(Flatten())

    model.add(Dense(units=10))

    model.add(Dense(units=80, activation='relu'))

    model.add(Reshape((1, 16, 5)))

    model.add(UpSampling2D(size=(1, 4)))

    model.add(
        Conv2D(5,
               11,
               strides=(1, 4),
               padding='same',
               activation='relu',
               name='deconv3'))

    #model.add(UpSampling2D(size=(1,4)))

    #model.add(  Conv2DTranspose(5, 11, strides=(1,4), padding='same', activation='relu') )

    #model.add(UpSampling2D(size=(1,4)))

    #model.add(Conv2DTranspose(4, 11, strides=(1,4), padding='same', activation='relu'))

    model.summary()

    #return 0

    input_layer = Input(shape=input_shape)

    x = Conv2D(10,
               11,
               strides=1,
               padding='same',
               activation='relu',
               input_shape=input_shape)(input_layer)
    #x=BatchNormalization(axis= -1)(x)
    #x=Activation('relu')(x)
    #x=MaxPooling2D(pool_size=(1,4))(x)

    x = Conv2D(10, 11, strides=1, padding='same', activation='relu')(x)
    #x=BatchNormalization(axis= -1)(x)
    #x=Activation('relu')(x)
    x = MaxPooling2D(pool_size=(1, 64))(x)

    #x = Conv2D(5, 11, strides=1, padding='same', activation='relu', input_shape=input_shape)(x)
    #x=BatchNormalization(axis= -1)(x)
    #x=Activation('relu')(x)
    #x=MaxPooling2D(pool_size=(1,4))(x)

    x = Flatten()(x)

    encoded = Dense(units=10)(x)

    x = Dense(units=40, activation='relu')(encoded)

    x = Reshape((1, 16, 10))(x)

    #x=UpSampling2D(size=(1,4))(x)

    x = Conv2DTranspose(10,
                        11,
                        strides=(1, 64),
                        padding='same',
                        activation='relu')(x)

    #x=UpSampling2D(size=(1,4))(x)

    #x = Conv2DTranspose(5, 11, strides=(1,1), padding='same', activation='relu')(x)

    #x=UpSampling2D(size=(1,4))(x)

    #x = Conv2DTranspose(5, 11, strides=(1,1), padding='same', activation='relu')(x)

    decoded = Conv2DTranspose(4,
                              11,
                              strides=(1, 1),
                              padding='same',
                              activation='sigmoid')(x)

    autoencoder = Model(input_layer, decoded)

    autoencoder.summary()

    encoder = Model(input_layer, encoded, name='encoder')
    '''
    input_layer = Input(shape=input_shape)

    x = Conv2D(5, 11, strides=4, padding='same', activation='relu', name='conv1', input_shape=input_shape)(input_layer)

    x = Conv2D(64, 15, strides=4, padding='same', activation='relu', name='conv2', input_shape=input_shape)(x)

    x = Conv2D(128, 11, strides=4, padding='same', activation='relu', name='conv3', input_shape=input_shape)(x)

    x = Flatten()(x)

    encoded = Dense(units=10, name='embedding')(x)

    ###

    x = Dense(units=2048, activation='relu')(encoded)

    x = Reshape(  (1, 16, 128)   )(x)

    x = Conv2DTranspose(64, 15, strides=(1,4), padding='same', activation='relu', name='deconv3')(x)

    x = Conv2DTranspose(32, 15, strides=(1,4), padding='same', activation='relu', name='deconv2')(x)

    decoded = Conv2DTranspose(4, 15, strides=(1,4), padding='same', name='deconv1')(x)

    ###

    autoencoder = Model(input_layer, decoded)

    autoencoder.summary()

    encoder = Model(input_layer, encoded, name='encoder')

    encoder.summary()
    '''

    simutation_parameters = {
        "PWM_file_1": "./JASPAR/MA0835.1.jaspar",
        "PWM_file_2": "./JASPAR/MA0515.1.jaspar",
        "seq_length": 1024,
        "center_pos": 100,
        "interspace": 10
    }

    [train_X, train_Y, test_X,
     test_Y] = get_simulated_dataset(parameters=simutation_parameters,
                                     train_size=20000,
                                     test_size=5000)

    print(train_X.shape)

    #################################
    # build autoencoder model

    autoencoder.compile(optimizer='adam', loss='binary_crossentropy')

    history_autoencoder = autoencoder.fit(x=train_X,
                                          y=train_X,
                                          batch_size=32,
                                          epochs=13,
                                          verbose=1,
                                          callbacks=[History()],
                                          validation_data=(test_X, test_X))

    encoded_imgs = encoder.predict(test_X)

    print(encoded_imgs.shape)

    colors = ['#e41a1c', '#377eb8', '#4daf4a']

    X_embedded = TSNE(n_components=2).fit_transform(encoded_imgs)

    plt.scatter(X_embedded[:, 0],
                X_embedded[:, 1],
                c=np.array(colors)[test_Y.flatten()])
    plt.colorbar()
    plt.show()
def example_generator():

    separate_dataset("True_target_with_labels_128.bed", ["chr1"], "valid.bed")
    separate_dataset("True_target_with_labels_128.bed", ["chr2", "chr19"],
                     "test.bed")
    separate_dataset("True_target_with_labels_128.bed", [
        "chr3", "chr4", "chr5", "chr6", "chr7", "chr8", "chr9", "chr10",
        "chr11", "chr12", "chr13", "chr14", "chr15", "chr16", "chr17", "chr18",
        "chr20", "chr21", "chr22"
    ], "train.bed")

    train_gen = DataGenerator(
        data_path="train.bed",
        ref_fasta=
        "../GSM1865005_allC.MethylC-seq_WT_rods_rep1.tsv/GRCm38.primary_assembly.genome.fa.gz",
        genome_size_file="./mm10.genome.size",
        epi_track_files=None,
        tasks=["TARGET"],
        upsample=False)

    valid_gen = DataGenerator(
        data_path="valid.bed",
        ref_fasta=
        "../GSM1865005_allC.MethylC-seq_WT_rods_rep1.tsv/GRCm38.primary_assembly.genome.fa.gz",
        genome_size_file="./mm10.genome.size",
        epi_track_files=None,
        tasks=["TARGET"],
        upsample=False)

    #model = initialize_model()
    # add functional models here

    input_shape = (1, 128, 4)
    input_layer = Input(shape=input_shape)
    x = Conv2D(40, 11, strides=1, padding='same',
               input_shape=input_shape)(input_layer)
    x = BatchNormalization(axis=-1)(x)
    x = Activation('relu')(x)

    x = MaxPooling2D(pool_size=(1, 32))(x)

    encoded = Flatten()(x)

    x = Reshape((1, 4, 40))(encoded)

    x = UpSampling2D(size=(1, 32))(x)

    decoded = Conv2D(4, 11, padding='same', activation='sigmoid')(x)

    ###

    autoencoder = Model(input_layer, decoded)

    autoencoder.summary()

    encoder = Model(input_layer, encoded, name='encoder')

    encoder.summary()

    autoencoder.compile(optimizer='adam', loss='mse')
    encoder.compile(optimizer='adam', loss='mse')

    trainning_history = autoencoder.fit_generator(
        train_gen,
        validation_data=valid_gen,
        #steps_per_epoch=5000,
        #validation_steps=500,
        epochs=600,
        verbose=1,
        use_multiprocessing=True,
        workers=6,
        max_queue_size=200,
        callbacks=[
            History(),
            ModelCheckpoint("ATAC_peak_autoencoder_32.h5",
                            monitor='val_loss',
                            verbose=1,
                            save_best_only=True,
                            mode='min',
                            save_weights_only=False),
            CustomCheckpoint('ATAC_peak_encoder_32.h5', encoder)
        ])
Пример #19
0
    def fit(self, env, nb_steps, action_repetition=1, callbacks=None, verbose=1,
            visualize=False, nb_max_start_steps=0, start_step_policy=None, log_interval=10000,
            nb_max_episode_steps=None):
        """Trains the agent on the given environment.

        # Arguments
            env: (`Env` instance): Environment that the agent interacts with. See [Env](#env) for details.
            nb_steps (integer): Number of training steps to be performed.
            action_repetition (integer): Number of times the agent repeats the same action without
                observing the environment again. Setting this to a value > 1 can be useful
                if a single action only has a very small effect on the environment.
            callbacks (list of `keras.callbacks.Callback` or `rl.callbacks.Callback` instances):
                List of callbacks to apply during training. See [callbacks](/callbacks) for details.
            verbose (integer): 0 for no logging, 1 for interval logging (compare `log_interval`), 2 for episode logging
            visualize (boolean): If `True`, the environment is visualized during training. However,
                this is likely going to slow down training significantly and is thus intended to be
                a debugging instrument.
            nb_max_start_steps (integer): Number of maximum steps that the agent performs at the beginning
                of each episode using `start_step_policy`. Notice that this is an upper limit since
                the exact number of steps to be performed is sampled uniformly from [0, max_start_steps]
                at the beginning of each episode.
            start_step_policy (`lambda observation: action`): The policy
                to follow if `nb_max_start_steps` > 0. If set to `None`, a random action is performed.
            log_interval (integer): If `verbose` = 1, the number of steps that are considered to be an interval.
            nb_max_episode_steps (integer): Number of steps per episode that the agent performs before
                automatically resetting the environment. Set to `None` if each episode should run
                (potentially indefinitely) until the environment signals a terminal state.

        # Returns
            A `keras.callbacks.History` instance that recorded the entire training process.
        """
        if not self.compiled:
            raise RuntimeError('Your tried to fit your agent but it hasn\'t been compiled yet. Please call `compile()` before `fit()`.')
        if action_repetition < 1:
            raise ValueError('action_repetition must be >= 1, is {}'.format(action_repetition))

        self.training = True

        callbacks = [] if not callbacks else callbacks[:]

        if verbose == 1:
            callbacks += [TrainIntervalLogger(interval=log_interval)]
        elif verbose > 1:
            callbacks += [TrainEpisodeLogger()]
        if visualize:
            callbacks += [Visualizer()]
        history = History()
        callbacks += [history]
        callbacks = CallbackList(callbacks)
        if hasattr(callbacks, 'set_model'):
            callbacks.set_model(self)
        else:
            callbacks._set_model(self)
        callbacks._set_env(env)
        params = {
            'nb_steps': nb_steps,
        }
        if hasattr(callbacks, 'set_params'):
            callbacks.set_params(params)
        else:
            callbacks._set_params(params)
        self._on_train_begin()
        callbacks.on_train_begin()

        episode = np.int16(0)
        self.step = np.int16(0)
        observation = None
        episode_reward = None
        episode_step = None
        did_abort = False
        try:
            while self.step < nb_steps:
                if observation is None:  # start of a new episode
                    callbacks.on_episode_begin(episode)
                    episode_step = np.int16(0)
                    episode_reward = np.float32(0)

                    # Obtain the initial observation by resetting the environment.
                    self.reset_states()
                    observation = deepcopy(env.reset())
                    if self.processor is not None:
                        observation = self.processor.process_observation(observation)
                    assert observation is not None

                    # Perform random starts at beginning of episode and do not record them into the experience.
                    # This slightly changes the start position between games.
                    nb_random_start_steps = 0 if nb_max_start_steps == 0 else np.random.randint(nb_max_start_steps)
                    for _ in range(nb_random_start_steps):
                        if start_step_policy is None:
                            action = env.action_space.sample()
                        else:
                            action = start_step_policy(observation)
                        if self.processor is not None:
                            action = self.processor.process_action(action)
                        callbacks.on_action_begin(action)
                        observation, reward, done, info = env.step(action)
                        observation = deepcopy(observation)
                        if self.processor is not None:
                            observation, reward, done, info = self.processor.process_step(observation, reward, done, info)
                        callbacks.on_action_end(action)
                        if done:
                            warnings.warn('Env ended before {} random steps could be performed at the start. You should probably lower the `nb_max_start_steps` parameter.'.format(nb_random_start_steps))
                            observation = deepcopy(env.reset())
                            if self.processor is not None:
                                observation = self.processor.process_observation(observation)
                            break

                # At this point, we expect to be fully initialized.
                assert episode_reward is not None
                assert episode_step is not None
                assert observation is not None

                # Run a single step.
                callbacks.on_step_begin(episode_step)
                # This is were all of the work happens. We first perceive and compute the action
                # (forward step) and then use the reward to improve (backward step).
                action = self.forward(observation)
                if self.processor is not None:
                    action = self.processor.process_action(action)
                reward = np.float32(0)
                accumulated_info = {}
                done = False
                for _ in range(action_repetition):
                    callbacks.on_action_begin(action)
                    observation, r, done, info = env.step(action)
                    observation = deepcopy(observation)
                    if self.processor is not None:
                        observation, r, done, info = self.processor.process_step(observation, r, done, info)
                    for key, value in info.items():
                        if not np.isreal(value):
                            continue
                        if key not in accumulated_info:
                            accumulated_info[key] = np.zeros_like(value)
                        accumulated_info[key] += value
                    callbacks.on_action_end(action)
                    reward += r
                    if done:
                        break
                if nb_max_episode_steps and episode_step >= nb_max_episode_steps - 1:
                    # Force a terminal state.
                    done = True
                metrics = self.backward(reward, terminal=done)
                episode_reward += reward

                step_logs = {
                    'action': action,
                    'observation': observation,
                    'reward': reward,
                    'metrics': metrics,
                    'episode': episode,
                    'info': accumulated_info,
                }
                callbacks.on_step_end(episode_step, step_logs)
                episode_step += 1
                self.step += 1

                if done:
                    # We are in a terminal state but the agent hasn't yet seen it. We therefore
                    # perform one more forward-backward call and simply ignore the action before
                    # resetting the environment. We need to pass in `terminal=False` here since
                    # the *next* state, that is the state of the newly reset environment, is
                    # always non-terminal by convention.
                    self.forward(observation)
                    self.backward(0., terminal=False)

                    # This episode is finished, report and reset.
                    episode_logs = {
                        'episode_reward': episode_reward,
                        'nb_episode_steps': episode_step,
                        'nb_steps': self.step,
                    }
                    callbacks.on_episode_end(episode, episode_logs)

                    episode += 1
                    observation = None
                    episode_step = None
                    episode_reward = None
        except KeyboardInterrupt:
            # We catch keyboard interrupts here so that training can be be safely aborted.
            # This is so common that we've built this right into this function, which ensures that
            # the `on_train_end` method is properly called.
            did_abort = True
        callbacks.on_train_end(logs={'did_abort': did_abort})
        self._on_train_end()

        return history
Пример #20
0
def example_1():

    simutation_parameters = {
        "PWM_file":
        "/home/qan/Desktop/DeepEpitif/DeepMetif/JASPAR2018_CORE_vertebrates_non-redundant_pfms_jaspar/MA0835.1.jaspar",
        "seq_length": 100,
        "center_pos": 20,
        "motif_width": 14,
        "metif_level": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
    }

    [train_X, train_Y, valid_X, valid_Y, test_X,
     test_Y] = get_simulated_dataset(parameters=simutation_parameters,
                                     train_size=16000,
                                     valid_size=2000,
                                     test_size=20)

    #print(train_X.dtype)
    #print(train_Y.dtype)
    #print(train_X[2,:,:,:])
    #print(train_Y)
    #print(train_X.shape[1::])

    #exit()
    one_filter_keras_model = Sequential()
    one_filter_keras_model.add(
        Conv2D(filters=5,
               kernel_size=(1, 15),
               padding="same",
               input_shape=train_X.shape[1::]))
    one_filter_keras_model.add(BatchNormalization(axis=-1))
    one_filter_keras_model.add(Activation('relu'))
    one_filter_keras_model.add(MaxPooling2D(pool_size=(1, 35)))
    one_filter_keras_model.add(Flatten())
    one_filter_keras_model.add(Dense(1))
    one_filter_keras_model.add(Activation("sigmoid"))
    one_filter_keras_model.summary()

    one_filter_keras_model.compile(optimizer='adam',
                                   loss='binary_crossentropy')

    metrics_callback = MetricsCallback(train_data=(train_X, train_Y),
                                       validation_data=(valid_X, valid_Y))

    print(one_filter_keras_model.get_weights())

    history_one_filter = one_filter_keras_model.fit(
        x=train_X,
        y=train_Y,
        batch_size=10,
        epochs=50,
        verbose=1,
        callbacks=[History(), metrics_callback],
        validation_data=(valid_X, valid_Y))
    #print(one_filter_keras_model.get_weights())

    one_filter_keras_model_json = one_filter_keras_model.to_json()
    with open("one_filter_keras_model.json", "w") as json_file:
        json_file.write(one_filter_keras_model_json)

    one_filter_keras_model.save_weights("one_filter_keras_model.h5")
    print("Saved model to disk")
Пример #21
0
def runTrainingClassification(uuid,
                              datasetDir,
                              validDir,
                              classNum,
                              dropoutValue=0.2,
                              batch_size=128,
                              nb_epoch=20,
                              step_size_train=10,
                              alphaVal=0.75,
                              depthMul=1):

    imageGen = ImageDataGenerator(rotation_range=30,
                                  width_shift_range=0.35,
                                  height_shift_range=0.35,
                                  zoom_range=0.35,
                                  shear_range=0.35,
                                  vertical_flip=False,
                                  horizontal_flip=False,
                                  brightness_range=[0.65, 1.35],
                                  rescale=1. / 255)

    trainSet = imageGen.flow_from_directory(datasetDir,
                                            target_size=(224, 224),
                                            color_mode='rgb',
                                            batch_size=batch_size,
                                            class_mode='categorical',
                                            shuffle=True)
    validSet = imageGen.flow_from_directory(validDir,
                                            target_size=(224, 224),
                                            color_mode='rgb',
                                            batch_size=32,
                                            class_mode='categorical',
                                            shuffle=True)

    class EarlyStoppingAtMinLoss(tf.keras.callbacks.Callback):
        def __init__(self, patience=3):
            super(EarlyStoppingAtMinLoss, self).__init__()
            self.patience = patience
            self.best_weights = None

        def on_train_begin(self, logs=None):
            self.wait = 0
            self.stopped_epoch = 0
            self.best = np.Inf
            self.last_acc = 0
            self.atleastepoc = 0

        def on_epoch_end(self, epoch, logs=None):
            current = logs.get('val_loss')
            val_acc = logs.get('val_acc')
            self.atleastepoc = self.atleastepoc + 1
            if np.less(current, self.best
                       ) or self.last_acc < 0.95 or self.atleastepoc < 25:
                self.best = current
                self.wait = 0
                self.last_acc = val_acc
                self.best_weights = self.model.get_weights()
            else:
                self.wait += 1
                if self.wait >= self.patience:
                    self.stopped_epoch = epoch
                    self.model.stop_training = True
                    print(
                        '\nRestoring model weights from the end of the best epoch.'
                    )
                    self.model.set_weights(self.best_weights)

        def on_train_end(self, logs=None):
            if self.stopped_epoch > 0:
                print('Epoch %05d: early stopping' % (self.stopped_epoch + 1))

    base_model = tf.keras.applications.MobileNet(input_shape=(224, 224, 3),
                                                 alpha=alphaVal,
                                                 depth_multiplier=depthMul,
                                                 dropout=dropoutValue,
                                                 pooling='avg',
                                                 include_top=False,
                                                 weights="imagenet",
                                                 classes=classNum)

    mbnetModel = Sequential([
        base_model,
        Dropout(dropoutValue, name='dropout'),
        Dense(classNum, activation='softmax')
    ])

    if classNum == 2:
        mbnetModel.compile(loss='binary_crossentropy',
                           optimizer=RAdam(),
                           metrics=['accuracy'])
    else:
        mbnetModel.compile(
            loss=
            'categorical_crossentropy',  #loss_softmax_cross_entropy_with_logits_v2,
            optimizer=RAdam(),
            metrics=['accuracy'])

    history = History()

    try:
        mbnetModel.fit_generator(generator=trainSet,
                                 steps_per_epoch=step_size_train,
                                 callbacks=[EarlyStoppingAtMinLoss(), history],
                                 epochs=50,
                                 validation_data=validSet)
    except Exception as e:
        return (-14, f'Unexpected Error Found During Triaining, {e}')

    mbnetModel.save(f'{localSSDLoc}trained_h5_file/{uuid}_mbnet10.h5')

    converter = tf.lite.TFLiteConverter.from_keras_model_file(
        f'{localSSDLoc}trained_h5_file/{uuid}_mbnet10.h5',
        custom_objects={
            'RAdam':
            RAdam,
            'loss_softmax_cross_entropy_with_logits_v2':
            loss_softmax_cross_entropy_with_logits_v2
        })
    tflite_model = converter.convert()
    open(f'{localSSDLoc}trained_tflite_file/{uuid}_mbnet10_quant.tflite',
         "wb").write(tflite_model)

    subprocess.run([
        f'{nncaseLoc}/ncc',
        f'{localSSDLoc}trained_tflite_file/{uuid}_mbnet10_quant.tflite',
        f'{localSSDLoc}trained_kmodel_file/{uuid}_mbnet10_quant.kmodel', '-i',
        'tflite', '-o', 'k210model', '--dataset', validDir
    ])

    if os.path.isfile(
            f'{localSSDLoc}trained_kmodel_file/{uuid}_mbnet10_quant.kmodel'):
        return (
            0, f'{localSSDLoc}trained_kmodel_file/{uuid}_mbnet10_quant.kmodel',
            history, validSet, mbnetModel)
    else:
        return (-16,
                'Unexpected Error Found During generating Kendryte k210model.')
Пример #22
0
# -----------------------------------
# COMPILE MODEL AND SET UP CALLBACKS |
# -----------------------------------
# always compile model AFTER layers have been frozen
recall = tf.keras.metrics.Recall()
precision = tf.keras.metrics.Precision()
validation_output_callback = tf.keras.callbacks.LambdaCallback(on_epoch_end=make_pred_output_callback(
    model,
    validation_generator,
    hprm['BATCH_SIZE']))
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc',
                                                                        recall,
                                                                        precision])

history = History()
npt_monitor = NeptuneMonitor(hprm['BATCH_SIZE'])
# ---------------------------------------------------------------------------------------------------------------------


# ---------------------------------------------
# TRAIN TOP LAYERS ON NEW DATA FOR A FEW EPOCHS|
# ---------------------------------------------
post_training_model = model.fit_generator(training_generator,
                                          steps_per_epoch=((hprm['TRAIN_SIZE'] // hprm['BATCH_SIZE'])+1),
                                          epochs=10,  # number of epochs, training cycles
                                          validation_data=validation_generator,  # performance eval on test set
                                          validation_steps=((hprm['TEST_SIZE'] // hprm['BATCH_SIZE'])+1),
                                          verbose=1,
                                          callbacks=[history,
                                                     npt_monitor])
Пример #23
0
RMSE = round(mean_squared_error(y_test, y_pred, squared=False), 4)
print('RMSE: ', RMSE)

r2 = round(r2_score(y_test, y_pred), 4)
print('R squared: ', r2)

results.append(['XGBoost Tuned', r2, RMSE])
"""## Neural Network
Since the size of our dataset is not that big, we'll start with a very simple neural network. 2 hidden layers of 5 nodes, with relu activation. 
We'll use an early stopping mechanism that looks at the loss on the validation data. If it doesn't improve in 5 epochs, it will stop.
"""

#Build Neural Network
n_cols = x_train.shape[1]
hist = History()

model = Sequential()

model.add(Dense(5, activation='relu', input_dim=n_cols))
model.add(Dense(5, activation='relu'))

model.add(Dense(1))

model.compile(loss='mean_squared_error', optimizer='adam')

es_callback = EarlyStopping(monitor='val_loss', patience=5)

hist = model.fit(x_train,
                 y_train,
                 epochs=100,