示例#1
0
def mnist_dnn2():
    mnist = keras.datasets.mnist
    (x_train, y_train), (x_test, y_test) = mnist.load_data(os.path.join(root_path, "data", "mnist", "mnist.npz"))
    x_train, x_test = x_train / 255.0, x_test / 255.0

    x_train = np.reshape(x_train, [-1, 28, 28])
    x_test = np.reshape(x_test, [-1, 28, 28])
    y_train = np_utils.to_categorical(y_train, 10)
    y_test = np_utils.to_categorical(y_test, 10)

    model = keras.models.Sequential()
    model.add(keras.layers.Dense(128, input_shape=(28, 28)))
    model.add(keras.layers.Activation("relu"))
    model.add(keras.layers.Dropout(0.2))

    model.add(keras.layers.Dense(64))
    model.add(keras.layers.Activation("relu"))
    model.add(keras.layers.Dropout(0.2))

    # model.add(keras.layers.Flatten())
    model.add(keras.layers.Reshape((-1,)))
    model.add(keras.layers.Dense(10))
    model.add(keras.layers.Activation("softmax"))

    model.summary()

    model.compile(optimizer=keras.optimizers.RMSprop(lr=0.001), loss="categorical_crossentropy", metrics=["accuracy"])

    model.fit(x_train, y_train, batch_size=32, epochs=10, verbose=1, validation_split=0.2)

    test_loss, test_accuracy = model.evaluate(x_test, y_test, batch_size=32, verbose=1)
    logger.info("\ntest_loss:{0},test_accuracy:{1}".format(test_loss, test_accuracy))
示例#2
0
    def update(self, state, action, nextState, reward):
        """
          The parent class calls this to observe a
          state = action => nextState and reward transition.
          You should do your Q-Value update here

          NOTE: You should never call this function,
          it will be called on your behalf
        """
        "*** YOUR CODE HERE ***"
        self.nextImage = getFrame()
        self.nextImage = numpy.array(self.nextImage)
        self.nextImage = resize(self.nextImage,
                                (self.frame_width, self.frame_height))
        self.nextImage = np.reshape(
            self.nextImage, [1, self.frame_height, self.frame_width, 3])
        self.nextImage = np.uint8(self.nextImage)
        #new code
        if not self.getLegalActions(state):
            done = True
        else:
            done = False

        # state_matrix = self.getStateMatrices(state)
        # nextState_matrix = self.getStateMatrices(nextState)
        # state_matrix = np.reshape(state_matrix, [1, self.state_size])
        # nextState_matrix = np.reshape(nextState_matrix, [1, self.state_size])

        self.remember(self.image, action, reward, self.nextImage, done)

        # entrenamos la red neuronal solo mientras estamos en entrenamiento
        #if self.episodesSoFar < self.numTraining:
        #if self.episodesSoFar < self.numTraining:
        if len(self.memory) > 2 * self.batch_size:
            self.replay(self.batch_size)
示例#3
0
 def process_frame(self, frame):
     if frame is None:
         return None
     frame = np.array(frame)
     frame = resize(frame, (self.frame_width, self.frame_height))
     frame = np.reshape(
         frame, [1, self.frame_height, self.frame_width, self.state_size])
     frame = 255 * frame
     frame = np.uint8(frame)
     return frame
示例#4
0
    def predict(self, data, number_of_steps):
        predictions = np.empty(shape=(number_of_steps,))
        data_shape = data.shape

        for i in range(predictions.shape[0]):
            predicted_value = self.model.predict(data)
            predictions[i] = predicted_value.item()
            # remove first element and add the prediction
            data = np.reshape(np.append(data[0][1:], predicted_value.item()), newshape=data_shape)
        return predictions
    def train(self,
              signals_group: SignalsGroup,
              clipnorm: float = 0.) -> History:
        if len(signals_group.signals_data) != self._signals_count:
            raise ValueError(
                f'Модель может обработать строго {self._signals_count} сигналов'
            )

        print(f'Обучение LSTM-автокодировщика для группы сигналов '
              f'"{signals_group.name}" {signals_group.signals}...')

        x_train = np.column_stack([
            self._preprocess(signal)
            for signal in signals_group.signals_data.values()
        ])
        x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1],
                                       1))  # samples, sample_len, features

        optimizer = optimizers.Adam(clipnorm=clipnorm)
        self._model.compile(optimizer=optimizer, loss='mse')

        callbacks = [
            EarlyStopping('val_loss', patience=self.MIN_EPOCHS,
                          min_delta=0.05),
        ]
        if self._tensorboard_dir:
            os.makedirs(os.path.dirname(self._tensorboard_dir), exist_ok=True)
            callbacks.append(
                TensorBoard(
                    log_dir=self._get_tensorboard_logs_dir(signals_group.name),
                    batch_size=self.BATCH_SIZE,
                    histogram_freq=0,
                    write_graph=True,
                    write_grads=True,
                    write_images=True,
                ))

        history = self._model.fit(
            x_train,
            x_train,
            batch_size=self.BATCH_SIZE,
            epochs=self.EPOCHS,
            validation_split=self.VALIDATION_SPLIT,
            shuffle=True,
            callbacks=callbacks,
        )

        models_path = self._get_model_path(signals_group.name)
        os.makedirs(os.path.dirname(models_path), exist_ok=True)
        self._model.save_weights(models_path)
        print(f'Модель сохранена в "{models_path}"')

        return history
def process_input_dataset(location):
    temp_array = np.load(location)
    temp_array = temp_array.reshape((temp_array.shape[0], factor, factor, 1))

    ret_array = temp_array[:, :, :, 0]
    # ret_array = np.append(ret_array, temp_array[:, :, :, 1], axis=0)
    # ret_array = np.append(ret_array, temp_array[:, :, :, 2], axis=0)
    ret_array = np.reshape(ret_array, (ret_array.shape[0],
                                       ret_array.shape[1],
                                       ret_array.shape[2],
                                       1))
    return ret_array
示例#7
0
    def getAction(self, state):
        """
          Compute the action to take in the current state.  With
          probability self.epsilon, we should take a random action and
          take the best policy action otherwise.  Note that if there are
          no legal actions, which is the case at the terminal state, you
          should choose None as the action.

          HINT: You might want to use util.flipCoin(prob)
          HINT: To pick randomly from a list, use random.choice(list)
        """
        # Pick Action
        legalActions = self.getLegalActions(state)
        # if 'Stop' in legalActions:
        #     legalActions.remove('Stop')

        action = None
        "*** YOUR CODE HERE ***"
        if not self.getLegalActions(state):
            return action  # Terminal State, return None

        self.image = getFrame()
        self.image = np.array(self.image)
        self.image = resize(self.image, (self.frame_width, self.frame_height))
        self.image = np.reshape(self.image,
                                [1, self.frame_height, self.frame_width, 3])
        self.image = np.uint8(self.image)

        #print 'Epsilon value: ', self.epsilon
        if self.epsilon > random.random():
            action = random.choice(legalActions)  # Explore
        else:
            #action = self.computeActionFromQValues(state)  # Exploit
            #state_matrix = self.getStateMatrices(state)
            #state_matrix = np.reshape(np.array(state_matrix), [1, self.state_size])
            #state_matrix = np.reshape(state_matrix, (1, self.frame_width, self.frame_height))

            act_values = self.model.predict(self.image)
            action = PACMAN_ACTIONS[(np.argmax(
                act_values[0]))]  # returns action

            if action not in legalActions:
                action = 'Stop'
                #action = random.choice(legalActions)

        self.doAction(state, action)
        return action
 def normalize_and_reshape(self, x):
     x = x.astype('float32') / 255.
     x = np.reshape(x, newshape=INPUT_SHAPE)  # CNN needs depth.
     return x
示例#9
0
from keras import Input, Model
from keras.layers import Dense, LSTM, np

deep_lstm = Input(shape=(None, 2))
dl = Dense(4)(deep_lstm)
dl = LSTM(1)(dl)

model = Model(inputs=deep_lstm, outputs=dl)
model.compile(optimizer='rmsprop', loss="mse")

x = np.array([[[1, 2], [3, 4]], [[4, 3], [2, 1]]])
y = np.array([1, -1])

model.fit(x, y, epochs=100)

print(model.predict(np.reshape(x[0], (-1, 2, 2))))
示例#10
0
from keras.datasets import mnist
from keras.layers import Input, Dense, Conv2D, MaxPooling2D, UpSampling2D, np
from keras.models import Model
from keras import backend as K

temp_input_array = np.load('../output/output.npy')
temp_input_array = temp_input_array.reshape((temp_input_array.shape[0], 28, 28, 3))

test_input_array = np.load('../output/output_test.npy')
test_input_array = test_input_array.reshape((test_input_array.shape[0], 28, 28, 3))

input_array = temp_input_array[:, :, :, 0]
input_array = np.append(input_array, temp_input_array[:, :, :, 1], axis=0)
input_array = np.append(input_array, temp_input_array[:, :, :, 2], axis=0)
input_array = np.reshape(input_array, (input_array.shape[0],
                                       input_array.shape[1],
                                       input_array.shape[2],
                                       1))

test_input_array = test_input_array[:, :, :, 0]
test_input_array = np.reshape(test_input_array, (test_input_array.shape[0],
                                                 test_input_array.shape[1],
                                                 test_input_array.shape[2],
                                                 1))

(x_train, _), (x_test, _) = mnist.load_data()

x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = np.reshape(x_train, (len(x_train), 28, 28, 1))  # adapt this if using `channels_first` image data format
x_test = np.reshape(x_test, (len(x_test), 28, 28, 1))  # adapt this if using `channels_first` image data format
    def custom_loss(self, y_true, y_pred):
        mask_shape = tf.shape(y_true)[:4]

        cell_x = tf.to_float(
            tf.reshape(tf.tile(tf.range(self.config['model']['grid_w']), [self.config['model']['grid_h']]),
                       (1, self.config['model']['grid_h'], self.config['model']['grid_w'], 1, 1)))
        cell_y = tf.transpose(cell_x, (0, 2, 1, 3, 4))

        cell_grid = tf.tile(tf.concat([cell_x, cell_y], -1), [self.config['train']['batch_size'], 1, 1, self.nb_box, 1])

        coord_mask = tf.zeros(mask_shape)
        conf_mask = tf.zeros(mask_shape)
        class_mask = tf.zeros(mask_shape)

        seen = tf.Variable(0.)
        total_loss = tf.Variable(0.)
        total_recall = tf.Variable(0.)
        total_boxes = tf.Variable(self.config['model']['grid_h'] * self.config['model']['grid_w'] *
                                  self.config['model']['num_boxes'] * self.config['train']['batch_size'])
        """
        Adjust prediction
        """
        ### adjust x and y
        pred_box_xy = tf.sigmoid(y_pred[..., :2]) + cell_grid

        ### adjust w and h tf.exp(
        pred_box_wh = tf.exp(y_pred[..., 2:4]) * np.reshape(self.config['model']['anchors'], [1, 1, 1, self.nb_box, 2])

        ### adjust confidence
        pred_box_conf = tf.sigmoid(y_pred[..., 4])

        ### adjust class probabilities
        pred_box_class = y_pred[..., 5:]

        """
        Adjust ground truth
        """
        ### adjust x and y
        true_box_xy = y_true[..., 0:2]  # relative position to the containing cell

        ### adjust w and h
        true_box_wh = y_true[..., 2:4]  # number of cells accross, horizontally and vertically

        ### adjust confidence
        true_wh_half = true_box_wh / 2.
        true_mins = true_box_xy - true_wh_half
        true_maxes = true_box_xy + true_wh_half

        pred_wh_half = pred_box_wh / 2.
        pred_mins = pred_box_xy - pred_wh_half
        pred_maxes = pred_box_xy + pred_wh_half

        intersect_mins = tf.maximum(pred_mins, true_mins)
        intersect_maxes = tf.minimum(pred_maxes, true_maxes)
        intersect_wh = tf.maximum(intersect_maxes - intersect_mins, 0.)
        intersect_areas = intersect_wh[..., 0] * intersect_wh[..., 1]

        true_areas = true_box_wh[..., 0] * true_box_wh[..., 1]
        pred_areas = pred_box_wh[..., 0] * pred_box_wh[..., 1]

        union_areas = pred_areas + true_areas - intersect_areas
        iou_scores = tf.truediv(intersect_areas, union_areas)

        true_box_conf = iou_scores * y_true[..., 4]

        ### adjust class probabilities
        true_box_class = tf.argmax(y_true[..., 5:], -1)

        """
        Determine the masks
        """
        ### coordinate mask: simply the position of the ground truth boxes (the predictors)
        coord_mask = tf.expand_dims(y_true[..., 4], axis=-1) * self.config['model']['coord_scale']

        ### confidence mask: penelize predictors + penalize boxes with low IOU
        # penalize the confidence of the boxes, which have IOU with some ground truth box < 0.6
        true_xy = self.true_boxes[..., 0:2]
        true_wh = self.true_boxes[..., 2:4]

        true_wh_half = true_wh / 2.
        true_mins = true_xy - true_wh_half
        true_maxes = true_xy + true_wh_half

        pred_xy = tf.expand_dims(pred_box_xy, 4)
        pred_wh = tf.expand_dims(pred_box_wh, 4)

        pred_wh_half = pred_wh / 2.
        pred_mins = pred_xy - pred_wh_half
        pred_maxes = pred_xy + pred_wh_half

        intersect_mins = tf.maximum(pred_mins, true_mins)
        intersect_maxes = tf.minimum(pred_maxes, true_maxes)
        intersect_wh = tf.maximum(intersect_maxes - intersect_mins, 0.)
        intersect_areas = intersect_wh[..., 0] * intersect_wh[..., 1]

        true_areas = true_wh[..., 0] * true_wh[..., 1]
        pred_areas = pred_wh[..., 0] * pred_wh[..., 1]

        union_areas = pred_areas + true_areas - intersect_areas
        iou_scores = tf.truediv(intersect_areas, union_areas)

        best_ious = tf.reduce_max(iou_scores, axis=4)
        #conf_mask = conf_mask + tf.to_float(best_ious < 0.5) * (1 - y_true[..., 4]) * self.no_object_scale

        # penalize the confidence of the boxes, which are reponsible for corresponding ground truth box
        #conf_mask = conf_mask + y_true[..., 4] * self.object_scale

        conf_mask_neg = tf.to_float(best_ious < 0.4) * (1 - y_true[..., 4]) * self.config['model']['no_obj_scale']
        conf_mask_pos = y_true[..., 4] * self.config['model']['obj_scale']

        ### class mask: simply the position of the ground truth boxes (the predictors)
        class_mask = y_true[..., 4] * tf.gather(self.class_wt, true_box_class) * self.config['model']['class_scale']

        """
        Warm-up training
        """
        no_boxes_mask = tf.to_float(coord_mask < self.config['model']['coord_scale'] / 2.)
        seen = tf.assign_add(seen, 1.)

        true_box_xy, true_box_wh, coord_mask = tf.cond(tf.less(seen, self.config['train']['warmup_batches'] + 1),
                                                       lambda: [true_box_xy + (0.5 + cell_grid) * no_boxes_mask,
                                                                true_box_wh + tf.ones_like(true_box_wh) * \
                                                                np.reshape(self.config['model']['anchors'],
                                                                [1, 1, 1, self.nb_box, 2]) * no_boxes_mask,
                                                                tf.ones_like(coord_mask)],
                                                       lambda: [true_box_xy,
                                                                true_box_wh,
                                                                coord_mask])

        """
        Finalize the loss
        """
        nb_coord_box = tf.reduce_sum(tf.to_float(coord_mask > 0.0))
        #nb_conf_box = tf.reduce_sum(tf.to_float(conf_mask > 0.0))
        nb_conf_box_neg = tf.reduce_sum(tf.to_float(conf_mask_neg > 0.0))
        nb_conf_box_pos = tf.subtract(tf.to_float(total_boxes), nb_conf_box_neg) #tf.reduce_sum(tf.to_float(conf_mask_pos > 0.0))
        nb_class_box = tf.reduce_sum(tf.to_float(class_mask > 0.0))

        true_box_wh = tf.sqrt(true_box_wh)
        pred_box_wh = tf.sqrt(pred_box_wh)

        loss_xy = tf.reduce_sum(tf.square(true_box_xy - pred_box_xy) * coord_mask) / (nb_coord_box + 1e-6) / 2.
        loss_wh = tf.reduce_sum(tf.square(true_box_wh - pred_box_wh) * coord_mask) / (nb_coord_box + 1e-6) / 2.
        loss_conf_neg = tf.reduce_sum(tf.square(true_box_conf - pred_box_conf) * conf_mask_neg) / (nb_conf_box_neg + 1e-6) / 2.
        loss_conf_pos = tf.reduce_sum(tf.square(true_box_conf - pred_box_conf) * conf_mask_pos) / (nb_conf_box_pos + 1e-6) / 2
        loss_conf = loss_conf_neg + loss_conf_pos

        loss_class = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=true_box_class, logits=pred_box_class)
        loss_class = tf.reduce_sum(loss_class * class_mask) / (nb_class_box + 1e-6)

        loss = tf.cond(tf.less(seen, self.config['train']['warmup_batches'] + 1),
                       lambda: loss_xy + loss_wh + loss_conf + loss_class + 10,
                       lambda: loss_xy + loss_wh + loss_conf + loss_class)

        if self.config['train']['debug']:
            nb_true_box = tf.reduce_sum(y_true[..., 4])
            nb_pred_box = tf.reduce_sum(tf.to_float(true_box_conf > 0.3) * tf.to_float(pred_box_conf > 0.25))

            current_recall = nb_pred_box / (nb_true_box + 1e-6)
            total_recall = tf.assign_add(total_recall, current_recall)

            total_loss = tf.assign_add(total_loss, loss)

            #loss = tf.Print(loss, [m2], message='\nPred box conf \t', summarize=1000)
            loss = tf.Print(loss, [loss_xy], message='\nLoss XY \t', summarize=1000)
            loss = tf.Print(loss, [loss_wh], message='Loss WH \t', summarize=1000)
            loss = tf.Print(loss, [nb_conf_box_neg], message='Nb Conf Box Negative \t', summarize=1000)
            loss = tf.Print(loss, [nb_conf_box_pos], message='Nb Conf Box Positive \t', summarize=1000)
            loss = tf.Print(loss, [loss_conf_neg], message='Loss Conf Negative \t', summarize=1000)
            loss = tf.Print(loss, [loss_conf_pos], message='Loss Conf Positive \t', summarize=1000)
            loss = tf.Print(loss, [loss_conf], message='Loss Conf \t', summarize=1000)
            loss = tf.Print(loss, [loss_class], message='Loss Class \t', summarize=1000)
            loss = tf.Print(loss, [loss], message='Total Loss \t', summarize=1000)
            loss = tf.Print(loss, [total_loss / seen], message='Average Loss \t', summarize=1000)
            #loss = tf.Print(loss, [y_true[..., 5:]], message='\nYtrue \t', summarize=1000)
            #loss = tf.Print(loss, [true_box_class], message='True box class \t', summarize=1000)
            #loss = tf.Print(loss, [pred_box_class], message=' Pred box class \t', summarize=1000)
            loss = tf.Print(loss, [nb_pred_box], message='Number of pred boxes \t', summarize=1000)
            loss = tf.Print(loss, [nb_true_box], message='Number of true boxes \t', summarize=1000)
            loss = tf.Print(loss, [current_recall], message='Current Recall \t', summarize=1000)
            loss = tf.Print(loss, [total_recall / seen], message='Average Recall \t', summarize=1000)

        return loss