예제 #1
0
def getphase(fname, driveN, x, Fs):

    xdat = np.append(x, np.zeros(int(Fs / fdrive)))
    corr2 = np.correlate(xdat, driveN)
    maxv = np.armax(corr2)

    return maxv
예제 #2
0
    def log_ushiriki(self, eval_policy):
        """
            Evaluate on Unshiriki env
        """
        self.best_rews = []
        candidates = []
        for step in range(self.params['eval_ep_lens']):
            ob = self.env.reset()
            policy = {}

            for i in range(self.env.policyDimension):
                policy[step + i] = eval_policy.get_action(ob)[0]
                candidates.append(policy)
            rew = self.env.evaluatePolicy(candidates)
            best_idx = np.armax(rew)
            best_policy = candidates[best_idx]
            best_rew = rew[best_idx]
        self.best_rews.append(best_rew)
    def ImageProcess(self,
                     image,
                     showLabels=True,
                     showBoundingBox=True,
                     showFPS=False):

        #Init YOLO if needed
        if (self.net is None):
            self.InitYoloV3()

        (H, W) = image.shape[:2]

        frame = image.copy()
        blob = cv2.dnn.blob.blobFromImage(frame,
                                          1 / 255.0, (416, 416),
                                          swapRB=True,
                                          crop=False)
        self.net.setInput(blob)
        if (showFPS == True):
            starttime = time.time()
        layerOutputs = self.net.foward(self.ln)
        if (showFPS == True):
            stoptime = time.time()
            print("FPS: {:.4f}".format((stoptime - starttime)))
        confidences = []
        outline = []
        class_ids = []

        for output in layerOutputs:
            for detection in output:
                scores = detection[5:]
                maxi_class = np.armax(scores)
                confidence = scores[maxi_class]
                if confidence > self.confidence:
                    box = detection[0:4] * np.array([W, H, W, H])
                    (centerX, centerY, width, height) = box.astype("int")
                    x = int(centerX - (width / 2))
                    y = int(centerY - (height / 2))
                    outline.append([x, y, int(width), int(height)])
                    class_ids.append(maxi_class)
                    confidences.append(float(confidence))

        box_line = cv2.dnn.NMSBoxes(outline, confidences, 0.5, 0.3)

        if len(box_line) > 0:
            flat_box = box_line.flatten()
            pairs = []
            for i in flat_box:
                (x, y) = (outline[i][0], outline[i][1])
                (w, h) = (outline[i][2], outline[i][3])

                x_plus_w = round(x + w)
                y_plus_h = round(y + h)

                label = str(self.LABELS[class_ids[i]])
                color = self.COLORS[class_ids[i]]

                if (showBoundingBox == True):
                    cv2.rectangle(frame, (x, y), (x_plus_w, y_plus_h), color,
                                  2)

                if (showLabels == True):
                    cv2.putText(frame, label, (x - 10, y - 10),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, color)

        return frame
def train_neural_network(imput_image):
    predict_action = convolutional_neural_network(input_image)

    argmax = tf.placeholder("float", [None, output])

    gt = tf.placeholder("float", [None])

    action = tf.reduce_sum(tf.multiply(predict_action, argmax), reduction_indices=1)

    cost = tf.reduce_mean(tf.square(action - gt))

    optimizer = tf.train.AdadeltaOptimizer(1e-6).minimize(cost)

    game = Game()

    D = deque()

    _, image = game.run(MOVE_STAY)

    image = cv2.cvtColor(cv2.resize(image, (100, 80)), cv2.COLOR_BGR2GRAY)

    ret, image = cv2.threshold(image, 1, 255, cv2.THRESH_BINARY)

    input_image_data = np.stack((image, image, image, image), axis=2)

    with tf.Session() as sess:

        sess.run(tf.initialize_all_variables())

        saver = tf.train.Saver()

        n = 0

        epsilon = INIT_ESPTION

        while True:

            action_t = predict_action.eval(feed_dict={input_image: [input_image_data]})[0]

            argmax_t = np.zeros([output], dtype=np.int)

            if (random.random() <= INIT_ESPTION):

                maxIndex = random.randrange(output)



            else:

                maxIndex = np.armax(action_t)

            argmax_t[maxIndex] = 1

            if epsilon > FINAL_ESPTION:
                epsilon -= (INIT_ESPTION - FINAL_ESPTION) / EXPLORE

            reward, image = game.run(list(argmax_t))

            image = cv2.cvtColor(cv2.resize(image, (100, 80)), cv2.COLOR_BGR2GRAY)

            ret, image = cv2.threshold(image, 1, 255, cv2.THRESH_BINARY)

            image = np.reshape(image, (80, 100, 1))

            input_image_datal = np.append(image, input_image_data[:, :, 0:3], axis=2)

            D.append((input_image_data, argmax_t, reward, input_image_datal))

            if len(D) > REPLAY_MEMORY:
                D.popleft()

            if n > OBSERVE:

                minibatch = random.sample(D, BATCH)

                input_image_data_batch = [d[0] for d in minibatch]

                argmax_batch = [d[1] for d in minibatch]

                reward_batch = [d[2] for d in minibatch]

                input_image_data1_batch = [d[3] for d in minibatch]

                gt_batch = []

                out_batch = predict_action.eval(feed_dict={input_image: input_image_data1_batch})

                for i in range(0, len(minibatch)):
                    gt_batch.append(reward_batch[i] + LEARN_RATE * np.max(out_batch[i]))

                optimizer.run(feed_dict={gt: gt_batch, argmax: argmax_batch, input_image: input_image_data_batch})

            input_image_data = input_image_datal

            n = n + 1

            if n % 10000 == 0:
                # saver.save(sess, 'C:\\Users\\hasee\\game.cpk', global_step=n)

                print(n, "epsilon:", epsilon, " ", "action:", maxIndex, " ", "reward: ", reward)
예제 #5
0
 def get_decision_indices(self,points):
     (N,d) = points.shape
     F = self.fn.evaluate(points)
     (n,A) = F.shape
     assert(N == n)
     return np.armax(F,axis=1)
예제 #6
0
    def inferObjectWithRandomMovements(
            self,  # noqa: C901, N802
            objectDescription,  # noqa: N803
            objectImage,  # noqa: N803
            all_class_targets,
            cellsPerColumn,  # noqa: N803
            trial_iter,
            fixed_touch_sequence=None,
            numSensations=None,
            randomLocation=False):
        """
        Attempt to recognize the specified object with the network. Moves
        the sensor over the object until the object is recognized.

        @param objectDescription (dict)
        For example:
        {"name": "Object 1",
         "features": [{"top": 0, "left": 0, "width": 10, "height": 10, "name": "A"},
                      {"top": 0, "left": 10, "width": 10, "height": 10, "name": "B"}]}

        @objectImage (Numpy array)
        The current object's image

        @all_class_targets (list)
        Target representations for each class

        @cellsPerColumn (int)

        @trial_iter (int)

        @param numSensations (int or None)
        Set this to run the network for a fixed number of sensations. Otherwise this
        method will run until the object is recognized or until maxTraversals is
        reached.

        @return inferredStep (int or None), incorrect (dic), prediction_sequence (list),
        touchSequence (list)
        """
        self.reset()

        for monitor in self.monitors.values():
            monitor.beforeInferObject(objectDescription)

        currentStep = 0  # noqa: N806
        finished = False
        inferred = False
        inferredStep = None  # noqa: N806
        prevTouchSequence = None  # noqa: N806
        incorrect = {
            "never_converged": 1,
            "false_convergence": 0
        }  # Track if the
        # non-recognition was due to convergance to an incorrect representation or
        # never converging

        for _ in xrange(self.maxTraversals):  # noqa: F821
            # Choose touch sequence.
            while True:
                touchSequence = range(len(
                    objectDescription["features"]))  # noqa: N806
                if fixed_touch_sequence is None:
                    random.shuffle(touchSequence)
                    print("\nPerforming inference using an arbitrary, unfixed"
                          "sequense of touches:")
                    print(touchSequence)

                else:
                    print("\nPerforming inference using a fixed random"
                          "sequense of touches:")
                    touchSequence = fixed_touch_sequence  # noqa: N806
                    print(touchSequence)

                # Make sure the first touch will cause a movement.
                if (prevTouchSequence is not None
                        and touchSequence[0] == prevTouchSequence[-1]):
                    continue

                break

            sense_sequence = [
            ]  # contains a list of all the previous input SDRs
            prediction_sequence = [
            ]  # contains a list of the current SDR prediction,
            # as well as previously sensed input SDRs up until inference is successful

            for i_feature in touchSequence:
                currentStep += 1
                feature = objectDescription["features"][i_feature]

                self._move(feature, randomLocation=randomLocation)

                pre_touch_location_list = self.column.get_location_copy(
                )  # Save
                # representation for later

                featureSDR = self.features[feature["name"]]  # noqa: N806

                self._sense(featureSDR, learn=False, waitForSettle=False)

                predictedColumns = map(
                    int,
                    list(
                        set(
                            np.floor(  # noqa: N806
                                self.column.L4.getBasalPredictedCells() /
                                cellsPerColumn))))
                # Note _sense of the feature itself does not change the predicted
                # columns on this touch iteration (and thus does not invalidate the
                # prediction), but it does ensure the BasalPredictedCells have been
                # updated following the movement, and we re-set the location
                # representation later once in post-inference

                # Include all previously sensed/predicted representaitons by
                # over-writing current_sequence
                current_sequence = sense_sequence[:]

                current_sequence.append(
                    list(predictedColumns))  # include the newly
                # predicted columns

                if currentStep == 1:  # On the first step, record the input sensation
                    prediction_sequence.append([featureSDR[:]])

                else:
                    prediction_sequence.append(current_sequence)

                if not inferred:
                    sense_sequence.append(featureSDR[:])

                else:
                    # Re-set location representations after inference successful so
                    # that additional sensations don't influence predictions, and we
                    # can use the output predictions to visualize what the network sees
                    # across the rest of the input space
                    module_iter = 0
                    for module in self.column.L6aModules:
                        module.activeCells = pre_touch_location_list[
                            module_iter]
                        module_iter += 1

                    # Once inference has taken place, sense_sequence gathers
                    # predictions
                    sense_sequence.append(list(predictedColumns))

                if not inferred:
                    # Use the sensory-activated cells to detect whether the object has
                    # been recognized. If these sensory-activated cells
                    # are correct, it implies that the input layer's representation is
                    # classifiable -- the location layer just correctly classified it.

                    representation = \
                        self.column.getSensoryAssociatedLocationRepresentation()

                    classification_list = []

                    if len(set(representation)) > 0:

                        # Check the representation against all possible target classes
                        for target_iter in range(10):
                            target_representations = \
                                all_class_targets[target_iter][i_feature]

                            if (set(representation) <= target_representations):
                                classification_list.append(target_iter)
                                print("Classified as a " + str(target_iter) +
                                      " with ground truth of " +
                                      objectDescription["name"])

                    if len(classification_list) > 1:
                        print(
                            "Classification list : classified as multiple classes!"
                        )
                        print(classification_list)

                        print(
                            "***Provisional code to handle multiple classes"
                            "implemented, but as never experienced, the reliability"
                            "of the intersection resolution is untested***")
                        exit()

                        intersection_list = []

                        # Check the amount of overlap between the representations of
                        for ambiguous_class_iter in range(
                                len(classification_list)):

                            intersection_list.append(
                                len(
                                    set(representation).intersection(
                                        all_class_targets[classification_list[
                                            ambiguous_class_iter]]
                                        [i_feature])))

                        # *** note this does not deal with Numpy's behaviour if two
                        # locations have the same maximum value -->
                        # see https://numpy.org/doc/stable/reference/
                        # generated/numpy.argmax.html

                        print("Intersection list:")
                        print(intersection_list)
                        classification_list = classification_list[np.armax(
                            intersection_list)]

                        print("After checking intersections, resolved to:")
                        print(classification_list)

                    inferred = (len(classification_list) == 1)

                    if inferred:
                        if classification_list[0] == int(
                                objectDescription["name"][0]):
                            print("Correctly classified")
                            inferredStep = currentStep  # noqa: N806
                            plt.imsave(
                                "correctly_classified/trial_" +
                                str(trial_iter) + "_" +
                                objectDescription["name"] + ".png",
                                objectImage)
                            incorrect = {
                                "never_converged": 0,
                                "false_convergence": 0
                            }

                        else:
                            print("Incorrectly classified")
                            incorrect = {
                                "never_converged": 0,
                                "false_convergence": 1
                            }
                            plt.imsave(
                                "misclassified/trial_" + str(trial_iter) +
                                "_example_" + objectDescription["name"] +
                                "_converged_to_" +
                                str(classification_list[0]) + ".png",
                                objectImage)
                            return None, incorrect, prediction_sequence, touchSequence

                finished = ((((inferred and numSensations is None) or
                              (numSensations is not None
                               and currentStep == numSensations)))
                            and currentStep == 25)
                # Continuing to step 25 ensures we gather network predictions even after
                # inference is successful

                if finished:
                    break

            prevTouchSequence = touchSequence  # noqa: N806

            if finished:
                break

        if incorrect["never_converged"] == 1:
            print("\nNever converged!")
            print("Inferred step when never converged " + str(inferredStep))

        return inferredStep, incorrect, prediction_sequence, touchSequence
예제 #7
0
 def predict(self, X):
     yp = self.model.predict(X)
     return np.armax(yp)