Ejemplo n.º 1
0
def opp_sliding_window(data_x, data_y, ws, ss):
    a = data_x.shape[1]  #data_x.shape[0] = 557963  data_x.shape[1] = 113
    data_x = sliding_window(data_x, (ws, data_x.shape[1]), (ss, 1))
    data_y_temp = sliding_window(data_y, ws, ss)  # (46495,24,113)
    data_y = np.asarray([[i[-1]] for i in data_y_temp])  # (46495,1)
    return data_x.astype(np.float32), data_y.reshape(len(data_y)).astype(
        np.uint8)
    def opp_sliding_window(self, data_x, data_y):
        ws = self.config['sliding_window_length']
        ss = self.config['sliding_window_step']

        logging.info(
            '        Network_User: Sliding window with ws {} and ss {}'.format(
                ws, ss))

        # Segmenting the data with labels taken from the end of the window
        data_x = sliding_window(data_x, (ws, data_x.shape[1]), (ss, 1))
        if self.config['label_pos'] == 'end':
            data_y_labels = np.asarray(
                [[i[-1]] for i in sliding_window(data_y, ws, ss)])
        elif self.config['label_pos'] == 'middle':
            # Segmenting the data with labels from the middle of the window
            data_y_labels = np.asarray(
                [[i[i.shape[0] // 2]] for i in sliding_window(data_y, ws, ss)])
        elif self.config['label_pos'] == 'mode':
            data_y_labels = []
            for sw in sliding_window(data_y, ws, ss):
                count_l = np.bincount(sw, minlength=self.num_classes)
                idy = np.argmax(count_l)
                data_y_labels.append(idy)
            data_y_labels = np.asarray(data_y_labels)

        # Labels of each sample per window
        data_y_all = np.asarray([i[:] for i in sliding_window(data_y, ws, ss)])

        logging.info('        Network_User: Sequences are segmented')

        return data_x.astype(np.float32), data_y_labels.reshape(
            len(data_y_labels)).astype(np.uint8), data_y_all.astype(np.uint8)
def opp_sliding_window(data_x, data_y, ws, ss):
    data_x = sliding_window(data_x, (ws, data_x.shape[1]), (ss, 1))
    data_y = np.asarray([[i[-1]] for i in sliding_window(data_y, ws, ss)])
    data_x, data_y = data_x.astype(np.float32), one_hot(
        data_y.reshape(len(data_y)).astype(np.uint8))
    print(" ..after sliding window (testing): inputs {0}, targets {1}".format(
        X_test.shape, y_test.shape))
    return data_x, data_y
Ejemplo n.º 4
0
def opp_sliding_window(data_x, data_y, ws, ss):
    data_x = sliding_window(data_x, (ws, data_x.shape[1]), (ss, 1))
    data_y = np.asarray([[i[-1]] for i in sliding_window(data_y, ws, ss)])

    data_x, data_y = data_x.astype(np.float32), one_hot(
        data_y.reshape(len(data_y)).astype(np.uint8))
    #print(data_y)
    return data_x, data_y
def opp_sliding_window(data_x, data_y, ws, ss, label_pos_end = True):
    '''
    Performs the sliding window approach on the data and the labels
    
    return three arrays.
    - data, an array where first dim is the windows
    - labels per window according to end, middle or mode
    - all labels per window
    
    @param data_x: ids for train
    @param data_y: ids for train
    @param ws: ids for train
    @param ss: ids for train
    @param label_pos_end: ids for train
    '''    


    print("Sliding window: Creating windows {} with step {}".format(ws, ss))
    
    data_x = sliding_window(data_x,(ws,data_x.shape[1]),(ss,1))
    
    # Label from the end
    if label_pos_end:
        data_y = np.asarray([[i[-1]] for i in sliding_window(data_y,(ws,data_y.shape[1]),(ss,1))])
    else:
    
        #Label from the middle
        if False:
            data_y_labels = np.asarray([[i[i.shape[0] // 2]] for i in sliding_window(data_y,(ws,data_y.shape[1]),(ss,1))])
        else:
            count_l=[]
            idy = []
            #Label according to mode
            try:
                
                data_y_labels = []
                for sw in sliding_window(data_y,(ws,data_y.shape[1]),(ss,1)):
                    labels = np.zeros((20)).astype(int)
                    count_l = np.bincount(sw[:,0], minlength = NUM_CLASSES)
                    idy = np.argmax(count_l)
                    attrs = np.sum(sw[:,1:], axis = 0)
                    attrs[attrs > 0] = 1
                    labels[0] = idy  
                    labels[1:] = attrs
                    data_y_labels.append(labels)
                #print(len(data_y_labels))
                data_y_labels = np.asarray(data_y_labels)
                
            
            except:
                print("Sliding window: error with the counting {}".format(count_l))
                print("Sliding window: error with the counting {}".format(idy))
                return np.Inf
            
            #All labels per window
            data_y_all = np.asarray([i[:] for i in sliding_window(data_y,(ws,data_y.shape[1]),(ss,1))])
    
    return data_x.astype(np.float32), data_y_labels.astype(np.uint8), data_y_all.astype(np.uint8)
def opp_sliding_window(data_x, data_y, ws, ss):
    data_x = sliding_window(data_x, (ws, data_x.shape[1]), (ss, 1))
    data_y = np.asarray([[i[-1]] for i in sliding_window(data_y, ws, ss)])
    #np.savetxt('answers.txt',data_y, fmt='%s',delimiter=' ',newline='\n')
    print(data_y)
    data_x, data_y = data_x.astype(np.float32), one_hot(
        data_y.reshape(len(data_y)).astype(np.uint8))
    print(" ..after sliding window (testing): inputs {0}, targets {1}".format(
        X_test.shape, y_test.shape))
    return data_x, data_y
Ejemplo n.º 7
0
def opp_sliding_window(data_x, data_y, ws, ss):
    """
    Obtaining the windowed data from the HAR data
    :param data_x: sensory data
    :param data_y: labels
    :param ws: window size
    :param ss: stride
    :return: windows from the sensory data (based on window size and stride)
    """
    data_x = sliding_window(data_x, (ws, data_x.shape[1]), (ss, 1))
    data_y = np.reshape(data_y, (len(data_y), ))  # Just making it a vector if it was a 2D matrix
    data_y = np.asarray([[i[-1]] for i in sliding_window(data_y, ws, ss)])
    return data_x.astype(np.float32), data_y.reshape(len(data_y)).astype(np.uint8)
Ejemplo n.º 8
0
def build_windows(filepath, config):
  print(f"generating windows from {filepath}")
  data = np.load(filepath)
  data_x, data_y = split_data(data)
  data_y = data_y.astype(int)

  data_x_windows = sliding_window(data_x, (config["WINDOW_SIZE"], data_x.shape[1]), (config["STEP_SIZE"], 1))

  data_y_windows = sliding_window(data_y, config["WINDOW_SIZE"], config["STEP_SIZE"])
  data_y_windows = [np.argmax(np.bincount(window, minlength=config["NUM_CLASSES"])) for window in data_y_windows]
  data_y_windows = np.array(data_y_windows)

  return data_x_windows, data_y_windows
Ejemplo n.º 9
0
def sliding_window_dnds(sequence_1: str, sequence_2: str,
                        window_size: int) -> Tuple[List[int], List[float]]:
    """
    Performs a sliding-window dN/dS analysis over the provided sequences.
    Returns a list of tuples (start_base_pair, dnds_ratio).
    """
    windows = zip(
        sliding_window(sequence_1, n=window_size),
        sliding_window(sequence_2, n=window_size),
    )
    return (
        [i for i in range(len(sequence_1) - window_size + 1)],
        [dnds(*window_pair) for window_pair in windows],
    )
    def opp_sliding_window(self, data_x, data_y):
        '''
        Performs the sliding window approach on the data and the labels

        return three arrays.
        - data, an array where first dim is the windows
        - labels per window according to end, middle or mode
        - all labels per window

        @param data_x: ids for train
        @param data_y: ids for train
        @return data_x: Sequence train inputs [windows, C, T]
        @return data_y_labels: Activity classes [windows, 1]
        @return data_y_all: Activity classes for samples [windows, 1, T]
        '''

        ws = self.config['sliding_window_length']
        ss = self.config['sliding_window_step']

        logging.info(
            '        Network_User: Sliding window with ws {} and ss {}'.format(
                ws, ss))

        # Segmenting the data with labels taken from the end of the window
        data_x = sliding_window(data_x, (ws, data_x.shape[1]), (ss, 1))
        if self.config['label_pos'] == 'end':
            data_y_labels = np.asarray(
                [[i[-1]] for i in sliding_window(data_y, ws, ss)])
        elif self.config['label_pos'] == 'middle':
            # Segmenting the data with labels from the middle of the window
            data_y_labels = np.asarray(
                [[i[i.shape[0] // 2]] for i in sliding_window(data_y, ws, ss)])
        elif self.config['label_pos'] == 'mode':
            data_y_labels = []
            for sw in sliding_window(data_y, ws, ss):
                count_l = np.bincount(sw, minlength=self.config['num_classes'])
                idy = np.argmax(count_l)
                data_y_labels.append(idy)
            data_y_labels = np.asarray(data_y_labels)

        # Labels of each sample per window
        data_y_all = np.asarray([i[:] for i in sliding_window(data_y, ws, ss)])

        logging.info('        Network_User: Sequences are segmented')

        return data_x.astype(np.float32), \
               data_y_labels.reshape(len(data_y_labels)).astype(np.uint8), \
               data_y_all.astype(np.uint8)
Ejemplo n.º 11
0
def opp_sliding_window(data_x, data_y, ws, ss, label_pos):
    '''
    Performs the sliding window approach on the data and the labels
    
    return three arrays.
    - data, an array where first dim is the windows
    - labels per window according to end, middle or mode
    - all labels per window
    
    @param data_x: ids for train
    @param data_y: ids for train
    @param ws: ids for train
    @param ss: ids for train
    @param label_pos_end: ids for train
    '''

    print("Sliding window: Creating windows {} with step {}".format(ws, ss))

    data_x = sliding_window(data_x, (ws, data_x.shape[1]), (ss, 1))
    if label_pos == 'end':
        data_y_labels = np.asarray([[i[-1]]
                                    for i in sliding_window(data_y, ws, ss)])
    elif label_pos == 'middle':
        # Segmenting the data with labels from the middle of the window
        data_y_labels = np.asarray([[i[i.shape[0] // 2]]
                                    for i in sliding_window(data_y, ws, ss)])
    elif label_pos == 'mode':
        data_y_labels = []
        for sw in sliding_window(data_y, (ws, data_y.shape[1]), (ss, 1)):
            labels = np.zeros((20)).astype(int)
            count_l = np.bincount(sw[:, 0], minlength=NUM_CLASSES)
            idy = np.argmax(count_l)
            attrs = np.sum(sw[:, 1:], axis=0)
            attrs[attrs > 0] = 1
            labels[0] = idy
            labels[1:] = attrs
            data_y_labels.append(labels)

        print(len(data_y_labels))
        data_y_labels = np.asarray(data_y_labels)

    # Labels of each sample per window
    #All labels per window
    data_y_all = np.asarray(
        [i[:] for i in sliding_window(data_y, (ws, data_y.shape[1]), (ss, 1))])

    return data_x.astype(np.float32), data_y_labels.astype(
        np.uint8), data_y_all.astype(np.uint8)
Ejemplo n.º 12
0
def processDirectory(classifier, inputFolder, outputFolder=None):
    logger = logging.getLogger("TestClassifier")
    acceptableExtensions = ('jpg', 'jpeg', 'png')
    try:
        shutil.rmtree(os.path.join(outputFolder, 'positive'))
        shutil.rmtree(os.path.join(outputFolder, 'negative'))
        os.makedirs(os.path.join(outputFolder, 'positive'))
        os.makedirs(os.path.join(outputFolder, 'negative'))
    except OSError:
        pass

    for filename in os.listdir(inputFolder):
        if filename.endswith(acceptableExtensions):
            logger.debug('Processing %s' % (filename,))
            image = Image(os.path.join(inputFolder, filename), windowSize=(30, 30), shiftSize=(3, 3))
            # _, windows = image.process()
            image.prepare()
            print filename
            s = ((np.array(image.image.shape) - np.array(image.windowSize)) // np.array(image.shiftSize)) + 1
            print s
            image.windowsAmountInfo = s
            windows = sliding_window(image = image.image, windowSize=(30, 30),shiftSize=(3, 3), flatten=True)
            l = windows.shape
            windows = windows.reshape(l[0],900)
            windows = np.insert(windows, 0, 0, axis=1 )

            result = classifier.predict(windows)
            print np.sum(result)
Ejemplo n.º 13
0
def compute_aggr_spec_seq(spec_mat, win_size, hop_size):
    shp2 = spec_mat.shape[1]
    mat_aggr = []
    for sub_mat in sliding_window(spec_mat, (win_size, shp2), (hop_size, shp2)):
        aggre_vec = sub_mat.mean(axis=0)
        mat_aggr.append(aggre_vec.tolist())
    return mat_aggr
Ejemplo n.º 14
0
def find_text2(image):
    result = ''
    model = tensorflow.keras.models.load_model('test/modelo')
    image = imutils.resize(image, height=28)
    labels = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
    for (x, y, window) in sliding_window(image,
                                         stepSize=5,
                                         windowSize=(winW, winH)):
        if window.shape[0] != winH or window.shape[1] != winW:
            continue
        data = get_grayscale_sector(window)
        predictions = model.predict(np.array([data]))[0]
        guess = np.argmax(predictions) if np.max(predictions) > 0.3 else None
        letter = labels[guess] if guess else "-"

        clone = image.copy()
        cv2.rectangle(clone, (x, y), (x + winW, y + winH), (0, 255, 0), 2)
        cv2.imshow("Window", clone)
        cv2.waitKey(1)
        time.sleep(0.025)

        result = result + letter

    print(result)
    return result
Ejemplo n.º 15
0
    def extractFeatures(self, positiveImageTemplate=None):
        windowSize, shiftSize, tagPosition = self.windowSize, self.shiftSize, self.tagPosition
        # if positiveImageTemplate is not None:
        #     imsave(positiveImageTemplate % (-1,), self.image)

        # count rows/columns amount
        s = ((np.array(self.image.shape) - np.array(windowSize)) // np.array(shiftSize)) + 1
        self.windowsAmountInfo = s
        windows = sliding_window(self.image, windowSize, shiftSize)
        self.positiveExamples = []
        self.negativeExamples = []
        j = 0
        for i, w in enumerate(windows):
            x, y = (i / s[1]) * shiftSize[0], (i % s[1]) * shiftSize[1]

            wSized = resize(w, self.finalWindowResolution)
            features = feature.hog(wSized)

            if (
                self.tagPosition
                and (x + windowSize[0] - tagPosition[0]) >= (windowSize[0] / 3)
                and (y + windowSize[1] - tagPosition[1]) >= (windowSize[1] / 3)
                and (tagPosition[2] - x) >= (windowSize[0] / 3)
                and (tagPosition[3] - y) >= (windowSize[1] / 3)
            ):

                if positiveImageTemplate is not None:
                    imsave(positiveImageTemplate % (j,), w)
                    j += 1
                self.positiveExamples.append(features)
            else:
                self.negativeExamples.append(features)
Ejemplo n.º 16
0
def compute_aggr_spec_seq(spec_mat, win_size, hop_size):
    shp2 = spec_mat.shape[1]
    mat_aggr = []
    for sub_mat in sliding_window(spec_mat, (win_size, shp2),
                                  (hop_size, shp2)):
        aggre_vec = sub_mat.mean(axis=0)
        mat_aggr.append(aggre_vec.tolist())
    return mat_aggr
Ejemplo n.º 17
0
def detect(img, scoreThreshold):

    order = 1

    scale = 1
    w, h = 20, 20
    rectangles = []

    # hog 特征属性
    winSize = (20, 20)
    blockSize = (10, 10)
    blockStride = (5, 5)
    cellSize = (5, 5)
    nBin = 9
    hog = cv.HOGDescriptor(winSize, blockSize, blockStride, cellSize, nBin)

    svm = cv.ml.SVM_load(svm_xml)
    # svm = cv.ml.SVM_load('../object-detection/svmhead.xml')

    # _, result = svm.predict(np.array([hist]))
    # a, score = svm.predict(np.array([hist]), flags=cv.ml.STAT_MODEL_RAW_OUTPUT | cv.ml.STAT_MODEL_UPDATE_MODEL)

    for resized in pyramid(img, scaleFactor):
        scale = float(img.shape[1] / float(resized.shape[1]))
        # print('scale:',scale)
        for (x, y, roi) in sliding_window(resized, window_step, (w, h)):
            if roi.shape[0] < w or roi.shape[1] < h:
                continue
            rx, ry, rx2, ry2 = int(x * scale), int(y * scale), int(
                (x + w) * scale), int((y + h) * scale)
            hist = hog.compute(roi, (5, 5))[:, 0]

            # 计算结果
            _, result = svm.predict(np.array([hist]))
            a, score = svm.predict(np.array([hist]),
                                   flags=cv.ml.STAT_MODEL_RAW_OUTPUT
                                   | cv.ml.STAT_MODEL_UPDATE_MODEL)

            # 过滤过大矩形
            # if (rx2 - rx > 39 or ry2 - ry > 39) and score[0][0] > -0.1:
            # 	continue
            if result[0][0] == 1 and score < -scoreThreshold:
                rectangles.append([rx, ry, rx2, ry2, abs(score[0][0])])
                # 存储检测的样本(到224帧,该225帧)
                # ro = cv.cvtColor(roi,cv.COLOR_BGR2GRAY)
                # cv.imwrite('train/' + str(zhen) + str(order) + '.jpg', ro)
                # order += 1
                # print(rx,ry,rx2-rx,ry2-ry,':',-score[0][0])

    # 非最大抑制
    # print(len(rectangles))
    rectangles = non_max_suppression_fast(np.array(rectangles), 0.1)
    return rectangles
Ejemplo n.º 18
0
def run_inference_on_image(image,
                           stepSize,
                           windowSize,
                           node_lookup,
                           fishDict="./data/fishnames.csv"):
    """Runs inference on an image.
  Args:
    image: Image file name.
  Returns:
    Nothing
  """
    # if not tf.gfile.Exists(image):
    #   tf.logging.fatal('File does not exisnode_lookup = NodeLookup()t %s', image)
    # image_data = tf.gfile.FastGFile(image, 'rb').read()

    # Creates graph from saved GraphDef.
    create_graph()

    with tf.Session() as sess:
        # Some useful tensors:
        # 'softmax:0': A tensor containing the normalized prediction across
        #   1000 labels.
        # 'pool_3:0': A tensor containing the next-to-last layer containing 2048
        #   float description of the image.
        # 'DecodeJpeg/contents:0': A tensor containing a string providing JPEG
        #   encoding of the image.
        # Runs the softmax tensor by feeding the image_data as input to the graph.
        i = 0
        fishnames = pd.read_csv(fishDict).fishname.values
        windowScores = {}
        softmax_tensor = sess.graph.get_tensor_by_name('softmax:0')
        for (x, y, window) in sw.sliding_window(image, stepSize, windowSize):
            i += 1
            if i % 100 == 0: print("Processing %dth window" % i)
            predictions = sess.run(softmax_tensor, {'DecodeJpeg:0': window})
            predictions = np.squeeze(predictions)

            # Creates node ID --> English string lookup.

            results = []
            top_k = predictions.argsort()[-5:][::-1]
            for node_id in top_k:
                human_string = node_lookup.id_to_string(node_id)
                score = predictions[node_id]
                #   print('%s (score = %.5f)' % (human_string, score))
                results.append((human_string, score))
            fishscore = 0.0
            for res in results:
                names = res[0].replace(', ', ',').lower().split(',')
                if len(np.intersect1d(names, fishnames)) > 0:
                    fishscore += res[1]
            windowScores[(x, y)] = {'score': fishscore, 'names': names}
        return windowScores
Ejemplo n.º 19
0
 def concatenate_same_class(self, data_x, data_y):
     X = np.zeros((1, 24, 113))
     Y = np.zeros((1, 1))
     for i in range(len(data_x)):
         if len(data_x[i]) > 0:
             # Padding
             padding_size = SLIDING_WINDOW_STEP - len(data_x[i]) % SLIDING_WINDOW_STEP \
                 if len(data_x[i]) > SLIDING_WINDOW_LENGTH else SLIDING_WINDOW_LENGTH+SLIDING_WINDOW_STEP-len(data_x[i])
             padding_x = np.zeros((padding_size, NB_SENSOR_CHANNELS))
             data_x[i] = np.row_stack((data_x[i], padding_x))
             data_y[i] = data_y[i].repeat(len(data_x[i]))
             x_temp = sliding_window(
                 data_x[i], (SLIDING_WINDOW_LENGTH, data_x[i].shape[1]),
                 (SLIDING_WINDOW_STEP, 1))
             y_temp = sliding_window(data_y[i], SLIDING_WINDOW_LENGTH,
                                     SLIDING_WINDOW_STEP)
             y_temp = np.asarray([[i[-1]] for i in y_temp])
             x_temp.astype(np.float32), y_temp.reshape(len(y_temp)).astype(
                 np.uint8)
             X = np.row_stack((X, x_temp))
             Y = np.row_stack((Y, y_temp))
     return X, Y
Ejemplo n.º 20
0
def subseqDists(x, y):
    """find the L2^2 distances between y and every subseq of x"""
    y = y.flatten()
    y = (y - np.mean(y)) / np.std(y)

    # flatten Nd input seqs
    origDims = len(x.shape)
    stride = origDims  # TODO allow stepping in more than one direction
    x = x.flatten()

    subseqs = window.sliding_window(x, len(y), stride)
    subseqs = zNormalizeRows(subseqs)

    return distsToRows(subseqs, y)
Ejemplo n.º 21
0
def subseqDists(x, y):
	"""find the L2^2 distances between y and every subseq of x"""
	y = y.flatten()
	y = (y - np.mean(y)) / np.std(y)

	# flatten Nd input seqs
	origDims = len(x.shape)
	stride = origDims # TODO allow stepping in more than one direction
	x = x.flatten()

	subseqs = window.sliding_window(x, len(y), stride)
	subseqs = zNormalizeRows(subseqs)

	return distsToRows(subseqs, y)
def vid_pipeline(img):
    global running_avg
    global index
    img_ = pipeline(img)
    img_ = perspective_warp(img_)
    out_img, curves, lanes, ploty = sliding_window(img_, draw_windows=False)
    curverad = get_curve(img, curves[0], curves[1])
    lane_curve = np.mean([curverad[0], curverad[1]])
    img = draw_lanes(img, curves[0], curves[1])

    font = cv2.FONT_HERSHEY_SIMPLEX
    fontColor = (0, 0, 0)
    fontSize = 1
    cv2.putText(img, 'Lane Curvature: {:.0f} m'.format(lane_curve), (500, 620), font, fontSize, fontColor, 2)
    cv2.putText(img, 'Vehicle offset: {:.4f} m'.format(curverad[2]), (500, 670), font, fontSize, fontColor, 2)
    return img
Ejemplo n.º 23
0
    def find(self, bwimg):
        # Set current counter
        self.counter += 1
        # Check if we need to re-run sliding window
        reset = False
        if self.counter - self.last_lane_counter >= LaneFinder.MAX_LANE_GAPS:
            reset = True

        last_l_fit = self.last_left_fit
        last_r_fit = self.last_right_fit
        if (reset is True) or (last_l_fit is None) or (last_r_fit is None):
            #self.reset() # reset history, start afresh
            lx, ly, rx, ry = sliding_window.sliding_window(bwimg)
        else:
            lx, ly, rx, ry = sliding_window.targeted_lane_search(
                bwimg, last_l_fit, last_r_fit)

        lval, lfit = LaneFinder.sane_line(ly, lx, last_l_fit)
        rval, rfit = LaneFinder.sane_line(ry, rx, last_r_fit)
        parallel = self.parallel_check(lfit, rfit)
        if lval and rval and parallel:
            self.last_lane_counter = self.counter
            self.save(ly, lx, ry, rx, lfit, rfit)
            lfit = self.smoothen('l')
            rfit = self.smoothen('r')
        elif lval is True and self.last_lane_width_fit is not None:
            self.last_lane_counter = self.counter
            self.partial_save(ly, lx, lfit, 'l')
            lfit = self.smoothen('l')
            rfit = lfit + self.last_lane_width_fit
        elif rval is True and self.last_lane_width_fit is not None:
            self.last_lane_counter = self.counter
            self.partial_save(ry, rx, rfit, 'r')
            rfit = self.smoothen('r')
            lfit = rfit - self.last_lane_width_fit
        elif len(self.leftx) > 0 and len(self.rightx) > 0:
            lfit = self.smoothen('l')
            rfit = self.smoothen('r')

        self.last_left_fit = lfit
        self.last_right_fit = rfit

        left_roc, right_roc = sliding_window.roc(bwimg.shape[0] - 1, lfit,
                                                 rfit)
        dfc = sliding_window.dist_from_center(bwimg, lfit, rfit)
        roc = (left_roc + right_roc) / 2
        return lfit, rfit, roc, dfc, ly, lx, ry, rx
Ejemplo n.º 24
0
def find_text_list_append(image):
    result = list()
    image = imutils.resize(image, height=28)
    for (x, y, window) in sliding_window(image,
                                         stepSize=5,
                                         windowSize=(winW, winH)):
        if window.shape[0] != winH or window.shape[1] != winW:
            continue
        a = 27
        letter = list()
        img = Image.fromarray(window)
        img.save('teste.png')
        letter.append(
            pytesseract.image_to_string(Image.open("teste.png"),
                                        config='--psm 10'))
        window2 = window

        while len(letter) > 1 and a > 20:
            a = a - 1
            img = Image.fromarray(window2[:, 0:a])
            img.save('teste.png')
            letter.append(
                pytesseract.image_to_string(Image.open("teste.png"),
                                            config='--psm 10'))

        a = 27
        while len(letter) > 1 and a > 15:
            a = a - 1
            img = Image.fromarray(window2[:, 27 - a:27])
            img.save('teste.png')
            letter.append(
                pytesseract.image_to_string(Image.open("teste.png"),
                                            config='--psm 10'))
        a = 27
        while len(letter) > 1 and a > 5:
            a = a - 1
            img = Image.fromarray(window2[:, 27 - a:a])
            img.save('teste.png')
            letter.append(
                pytesseract.image_to_string(Image.open("teste.png"),
                                            config='--psm 10'))
        print(letter)
        result.append(letter)

    print(result)
    return result
Ejemplo n.º 25
0
def specific_window_hists(img, wind_width, wind_height, window_list):
    """
    :param img: Complete image
    :param wind_width: wanted window width
    :param wind_height: wanted window height
    :param window_list: list of 4(FOUR) window numbers to plot
    :return: plots windows form window list and histograms with 4 distribution moments
    """
    image_counter = 0
    plot_counter = 0

    fig4 = plt.figure(constrained_layout=True)
    spec4 = gridspec.GridSpec(ncols=2, nrows=4, figure=fig4)
    fig4.suptitle('windows and hists')

    for (x, y, window) in sliding_window(img,
                                         step_size=99,
                                         window_size=(wind_width,
                                                      wind_height)):
        if window.shape[0] != wind_height or window.shape[1] != wind_width:
            continue

        if image_counter == window_list[0] or image_counter == window_list[1] or image_counter == window_list[2] \
                or image_counter == window_list[3]:
            n, bins = np.histogram(window.ravel(), 256, [0, 256])
            mids = 0.5 * (bins[1:] + bins[:-1])
            mean = np.average(mids, weights=n)
            var = np.average((mids - mean)**2, weights=n)
            std = np.sqrt(var)
            skewness = np.average((mids - mean)**3, weights=n) / std**3
            kurtosis = np.average((mids - mean)**4, weights=n) / std**4

            ax11 = fig4.add_subplot(spec4[plot_counter, 0])
            ax11.set_title("Window:{}".format(image_counter))
            plt.imshow(window)
            ax12 = fig4.add_subplot(spec4[plot_counter, 1])
            ax12.set_title(
                'mean= {}, var= {}, skewness= {}, kurtosis= {}'.format(
                    round(mean, 2), round(var, 2), round(skewness, 2),
                    round(kurtosis, 2)))
            plt.hist(window.ravel(), 256, [0, 256])
            plot_counter += 1
        image_counter += 1
    plt.show()
Ejemplo n.º 26
0
def lane_detection(img, size=(100, 100)):
    time_start = time.time()
    img = cv2.resize(img, size)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    time_1 = time.time()
    img_ = pipeline(img)
    time_2 = time.time()
    img_warp = perspective_warp(img_)
    time_3 = time.time()
    out_img, curves, lanes, ploty = sliding_window(img_warp,
                                                   draw_windows=False)
    time_4 = time.time()

    print("\n time_color_channel_conv = " + str(int((time_1-time_start)*1000)) + "\n pipeline_time = "\
          + str(int((time_2-time_1)*1000)) + "\n perspective_warp_time = " \
          + str(int((time_3-time_2)*1000)) + "\n sliding_window_time = " + str(int((time_4-time_3)*1000)))
    print("\n Overall Time = " + str(int((time_4 - time_start) * 1000)))

    return curves, lanes, ploty, out_img
Ejemplo n.º 27
0
    def routine(frame, camera_mks_rm):
        global rel_x_ratio, num_data
        processed_frame, explain1 = processing(frame)

        choosen_left, choosen_right, explain2 = sliding_window(processed_frame)

        ret_left, linear_func_left = get_sliding_window_function(choosen_left)
        ret_right, linear_func_right = get_sliding_window_function(
            choosen_right)

        if ret_left and ret_right:
            steering_angle, explain2 = get_steering_angle_from_linear_function(
                (linear_func_left + linear_func_right) / 2, explain2)
            # left = linear_func_left(480)
            # right = linear_func_right(480)
            # print(left, right, right - left)

            # steering_angle = filter_deg.get_moving_average(steering_angle)
        elif ret_left:
            steering_angle, explain2 = get_steering_angle_from_linear_function(
                linear_func_left, explain2, rel_x_ratio=1.2)
            # steering_angle = filter_deg.get_moving_average(steering_angle)
        elif ret_right:
            steering_angle, explain2 = get_steering_angle_from_linear_function(
                linear_func_right, explain2, rel_x_ratio=0.8)
            # steering_angle = filter_deg.get_moving_average(steering_angle)
        else:
            steering_angle = filter_deg2.prev_lpf
            # steering_angle = filter_deg.prev_avg
        steering_angle = filter_deg2.get_lpf(steering_angle)[0]

        _, explain2 = tfcp.get_segment(explain2, camera_mks_rm,
                                       linear_func_left, linear_func_right)

        # ret_line, _ = tfcp.get_line(explain2, num_data, camera_mks_rm)

        # Merge Explain
        vertical_line = np.zeros((explain1.shape[0], 5, 3), dtype=np.uint8)
        explain_merge = np.hstack((explain2, vertical_line, explain1))

        return steering_angle, explain_merge
Ejemplo n.º 28
0
def makeBlocks(blockW, blockH, step, imageName):
	#init blockW and blockH should be parameters and step
	#import pdb; pdb.set_trace()
	#image = cv2.imread(imageName, cv2.CV_LOAD_IMAGE_GRAYSCALE)
	image = cv2.imread(imageName)
	image = cv2.cvtColor(image,cv2.COLOR_RGB2GRAY)
	start_time = time.time()
	
	blockArr = []
	i = 0
	for (x, y, window) in sliding_window.sliding_window(image, stepSize = step, windowSize=(blockW, blockH)):
		if window.shape[0] != blockH or window.shape[1] != blockW:
			continue #if the block is too small should I proccess it?

		#going to assume that the block is always a square
		#otherwise I'll need another parameter for the windowOb
		window = window[:, :, np.newaxis]
		window = skimage.img_as_float(window).astype(np.float32) 			
		blockArr.append(window.copy())

		i += 1
	#print ("---%s seconds ---" %(time.time() - start_time))                                                        
	return blockArr	
Ejemplo n.º 29
0
def find_best(image):
    result = ''

    image = imutils.resize(image, height=28)
    for (x, y, window) in sliding_window(image,
                                         stepSize=10,
                                         windowSize=(winW, winH)):
        if window.shape[0] != winH or window.shape[1] != winW:
            continue
        a = 27
        img = Image.fromarray(window)
        img.save('teste.png')
        letter = pytesseract.image_to_string(Image.open("teste.png"),
                                             config='--psm 10')
        window2 = window
        if len(letter) > 3:
            a = 27
            while len(letter) > 3 and a > 15:
                a = a - 1
                img = Image.fromarray(window2[:, 0:a])
                img.save('teste.png')
                letter = pytesseract.image_to_string(Image.open("teste.png"),
                                                     config='--psm 10')
        if len(letter) > 3:
            a = 27
            while len(letter) > 1 and a > 15:
                a = a - 1
                img = Image.fromarray(window2[:, 27 - a:27])
                img.save('teste.png')
                letter = pytesseract.image_to_string(Image.open("teste.png"),
                                                     config='--psm 10')
        result = result + letter[0:len(letter) - 2]
        # print ( a )
        # print( letter )
    # print( "threshould: " + str(threshould) )
    print(result)
    return result
Ejemplo n.º 30
0
    def routine(frame):
        global rel_x_ratio
        processed_frame, explain1 = processing(frame)

        choosen_left, choosen_right, explain2 = sliding_window(processed_frame)

        ret_left, linear_func_left = get_sliding_window_function(choosen_left)
        ret_right, linear_func_right = get_sliding_window_function(choosen_right)

        if ret_left and ret_right:
            steering_angle, explain2 = get_steering_angle_from_linear_function((linear_func_left + linear_func_right)/2, explain2)
            # left = linear_func_left(480)
            # right = linear_func_right(480)
            # print(left, right, right - left)
            
            ## distance ~= 435.662 PIXEL or 0.845m 
            ## 515.58 PIXEL / m
            ## Offset from lidar to ROI : 0.400 m
            
            # steering_angle = filter_deg.get_moving_average(steering_angle)
        elif ret_left:
            steering_angle, explain2 = get_steering_angle_from_linear_function(linear_func_left, explain2, rel_x_ratio=1.2)
            # steering_angle = filter_deg.get_moving_average(steering_angle)
        elif ret_right:
            steering_angle, explain2 = get_steering_angle_from_linear_function(linear_func_right, explain2, rel_x_ratio=0.8)
            # steering_angle = filter_deg.get_moving_average(steering_angle)
        else:
            steering_angle = filter_deg2.prev_lpf
            # steering_angle = filter_deg.prev_avg
        steering_angle = filter_deg2.get_lpf(steering_angle)[0]

        # Merge Explain
        vertical_line = np.zeros((explain1.shape[0], 5, 3), dtype=np.uint8)
        explain_merge = np.hstack((explain2, vertical_line, explain1))

        return steering_angle, explain_merge
Ejemplo n.º 31
0
        os.makedirs(DataPath)

    os.system("find /Users/kate/PycharmProjects/VGGUnet-predict -name '.DS_Store' -delete")
    path_orig = DataPath + 'orig/'
    if not os.path.exists(path_orig):
        os.makedirs(path_orig)
    path_crop = DataPath + 'crop/'
    if not os.path.exists(path_crop):
        os.makedirs(path_crop)

    os.system("find /Users/kate/PycharmProjects/VGGUnet-predict -name '.DS_Store' -delete")

    path_orig = DataPath + 'orig/'
    path_crop = DataPath + 'crop/'

    step_x, step_y = sliding_window(path_orig, path_crop)

    print("step_x, step_y: ", step_x, step_y)
    create_predict(path_crop, path_crop, h, w, weights_path, n_classes)
    os.system("find /Users/kate/PycharmProjects/VGGUnet-predict -name '.DS_Store' -delete")

    merge_im(path_crop, path_crop, step_x, step_y, 1, True)

    k = number_of_splices(path_crop)
    for i in range(k):
        merge_im(path_crop, path_crop, step_x, step_y, 1, False)

    print("теперь по вертикали!\n")
    files = os.listdir(path_crop)

    while len(files) > 1:
def find_all_unique_kmers(text, size):
  return list(window for window in sliding_window(text, size))
                            gray_res = cv2.cvtColor(color_res, cv2.COLOR_BGR2GRAY)
                            cv2.imwrite("/home/sarbajit/PyCharm_Scripts/test/back_project_test/results_gray_temp/" + item, gray_res)
                            r1,r2,c1,c2,length_contour, row_height, col_width = contour_finding(item,item_roi)



                            if col_width < 175:
                                if os.path.isfile('//home/sarbajit/PycharmProjects/BeeHive/clusters_contour/'+item_roi+'/'+item):
                                    os.remove('/home/sarbajit/PycharmProjects/BeeHive/clusters_contour/'+item_roi+'/'+item)
                            else:
                                if not os.path.exists(cluster_path_cropped+item_roi):
                                    os.makedirs(cluster_path_cropped+item_roi)
                                im_cropped = im_cropped[r1:r2,c1:c2]
                                cv2.imwrite(cluster_path_cropped+item_roi+'/'+item, im_cropped)

                                sliding_window(item, r1, r2, row_height)

                                if not os.path.exists(cluster_path_cropped_full+item_roi):
                                    os.makedirs(cluster_path_cropped_full+item_roi)

                                if ((c1>40)or(c2<335)):
                                    c1 = c1_ideal
                                    c2 = c2_ideal

                                im_cropped2 = im_cropped2[r1:r2,c1:c2]
                                cv2.imwrite(cluster_path_cropped_full+item_roi+'/'+item, im_cropped2)




Ejemplo n.º 34
0
 for j in range(len(nonbreath_files)):
     print('\nReading file (non-breathing) number', str(j + 1))
     nonbreath_file = nonbreath_files[j]
     temp_nb, temp_sr_nb = librosa.load(nonbreath_file, duration = 8) # if limit is wanted, duration = 8
     temp_dur = temp_nb.shape[0] / float(temp_sr_nb)
     nb_dur = nb_dur + temp_dur
     print('\tFile (non-breathing) sampling rate :', str(temp_sr_nb))
     print('\tFile (non-breathing) duration :', "{0:.2f}".format(temp_dur), 'seconds')
     nb = np.append(nb, temp_nb)  
     
 print('\n\tTotal duration (breathing) :', "{0:.2f}".format(b_dur), 'seconds')
 print('\n\tTotal duration (non-breathing) :', "{0:.2f}".format(nb_dur), 'seconds') 
 
 # windowing
 window_len = 1024
 b_feat = sliding_window(b, window_len, window_len/2)
 nb_feat = sliding_window(nb, window_len, window_len/2)
 
 # zero mean scaling within each window
 b_feat = b_feat - np.transpose(np.tile(np.mean(b_feat, axis = 1), (b_feat.shape[1], 1)))
 nb_feat = nb_feat - np.transpose(np.tile(np.mean(nb_feat, axis = 1), (nb_feat.shape[1], 1)))
 
 '''# unity variance scaling
 b_feat = b_feat / np.transpose(np.tile(np.std(b_feat, axis = 1), (b_feat.shape[1], 1)))
 nb_feat = nb_feat / np.transpose(np.tile(np.std(nb_feat, axis = 1), (nb_feat.shape[1], 1)))'''  
 
 # fft features
 b_feat = np.abs(np.fft.fft(b_feat, axis = 1))
 nb_feat = np.abs(np.fft.fft(nb_feat, axis = 1))
 all_feats = np.vstack((b_feat, nb_feat))
 
Ejemplo n.º 35
0
cv2.namedWindow('Sliding Window', cv2.WINDOW_NORMAL)
cv2.resizeWindow('Sliding Window', image.shape[1] / 2, image.shape[0] / 2)

# Image pyramid parameters
scale = 2.0
minSize = (500, 500)
# Sliding window parameters
stepSize = 16
(winW, winH) = (64, 64)

bboxes = np.zeros(4, np.int64)  # Variable to save the resulting bounding boxes
# loop over the image pyramid
for i, resized in enumerate(pyramid(image, scale=scale, minSize=minSize)):
    # loop over the sliding window for each layer of the pyramid
    for (x, y, window) in sliding_window(resized,
                                         stepSize=stepSize,
                                         windowSize=(winW, winH)):
        # if the window does not meet our desired window size, ignore it
        if window.shape[0] != winH or window.shape[1] != winW:
            continue

        # Draw sliding Window
        clone = resized.copy()
        cv2.rectangle(clone, (x, y), (x + winW, y + winH), (0, 255, 0), 2)

        # Cropped the resized image using x,y,winW, winH
        cropped_img = resized[y:y + winH, x:x + winW]
        # Resize it so the HOG descriptor can be obtained
        cropped_img_resized = cv2.resize(cropped_img, winSize)
        # Compute the HOG descriptor
        descriptor = np.transpose(hog.compute(cropped_img_resized))
Ejemplo n.º 36
0
# split the image into channels and create the exBR composite image, removing the green channel.
# Result is converted to HSV
b, g, r = cv2.split(image)
merged = cv2.merge((b, b, r))
hsv = cv2.cvtColor(merged, cv2.COLOR_BGR2HSV)
display = image.copy()

# set the sliding window parameters
STEPH = int(image.shape[0] / 10)
WINH = int(image.shape[0] / 10)
STEPW = int(image.shape[1] / 10)
WINW = int(image.shape[1] / 10)

# create sliding windows
for (winID, x, y, imageOut) in sliding_window(hsv,
                                              stepSizeH=STEPH,
                                              stepSizeW=STEPW,
                                              windowSize=(WINW, WINH)):
    cv2.rectangle(display, (x, y), (x + WINW, y + WINH), (0, 255, 0), 2)
    if imageOut.shape[0] != WINH or imageOut.shape[1] != WINW:
        continue

    # if SLIDERS is TRUE open up the masked image and allow user input to change threshold values on sliders
    while SLIDERS:
        thresh, paramsDict = colour_mask(imageOut, segmentationWindow)
        masked = cv2.bitwise_and(imageOut, imageOut, mask=thresh)
        cv2.imshow("slider", masked)
        k = cv2.waitKey(5) & 0xFF
        if k == 27:
            # save the parameters to file for next time
            with open('parameters.json', 'w+') as f:
                json.dump(paramsDict, f, indent=4)
Ejemplo n.º 37
0
            nonbreath_file, duration=8)  # if limit is wanted, duration = 8
        temp_dur = temp_nb.shape[0] / float(temp_sr_nb)
        nb_dur = nb_dur + temp_dur
        print('\tFile (non-breathing) sampling rate :', str(temp_sr_nb))
        print('\tFile (non-breathing) duration :', "{0:.2f}".format(temp_dur),
              'seconds')
        nb = np.append(nb, temp_nb)

    print('\n\tTotal duration (breathing) :', "{0:.2f}".format(b_dur),
          'seconds')
    print('\n\tTotal duration (non-breathing) :', "{0:.2f}".format(nb_dur),
          'seconds')

    # windowing
    window_len = 1024
    b_feat = sliding_window(b, window_len, window_len / 2)
    nb_feat = sliding_window(nb, window_len, window_len / 2)

    # zero mean scaling within each window
    b_feat = b_feat - np.transpose(
        np.tile(np.mean(b_feat, axis=1), (b_feat.shape[1], 1)))
    nb_feat = nb_feat - np.transpose(
        np.tile(np.mean(nb_feat, axis=1), (nb_feat.shape[1], 1)))
    '''# unity variance scaling
    b_feat = b_feat / np.transpose(np.tile(np.std(b_feat, axis = 1), (b_feat.shape[1], 1)))
    nb_feat = nb_feat / np.transpose(np.tile(np.std(nb_feat, axis = 1), (nb_feat.shape[1], 1)))'''

    # fft features
    b_feat = np.abs(np.fft.fft(b_feat, axis=1))
    nb_feat = np.abs(np.fft.fft(nb_feat, axis=1))
    all_feats = np.vstack((b_feat, nb_feat))
Ejemplo n.º 38
0
def gen_td_examples(df,win_size,over_lap,headers,label):
    df = df[(df['label'] == label)]
    examples = [sliding_window(np.array(df[c]),win_size,over_lap) for c in headers]
    return (examples,[label] * len(examples[0]))
Ejemplo n.º 39
0
import cv2
import pdb 
import numpy as np 
import classifier
import skimage
###
pdb.set_trace()
net = classifier.classifier() 
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="Path to the image")
args = vars(ap.parse_args())
# load the image and define the window width and height
image = cv2.imread(args["image"])
(winW, winH) = (64,64)
for (x,y,window) in sliding_window.sliding_window(image,stepSize = 8, windowSize=(winW, winH)):
	if window.shape[0] != winH or window.shape[1] != winW:
		continue
#	window = window[:, :, np.newaxis]
	w = skimage.img_as_float(window).astype(np.float32)
	prediction = net.predict([w],oversample=False)
	print x,y,prediction[0][1]
	cv2.imshow("Window",window)	
	clone = image.copy()
	if prediction[0][1] >.9:
		cv2.circle(image,(x+32,y+32),4,(0,0,255),-1)	
	cv2.rectangle(clone,(x,y),(x+winW,y+winH),(255,255,0),2)
	cv2.imshow("Window", clone)
	cv2.waitKey(1)
	time.sleep(.005) 		
 for j in range(len(nonbreath_files)):
     print('\nReading file (non-breathing) number', str(j + 1))
     nonbreath_file = nonbreath_files[j]
     temp_nb, temp_sr_nb = librosa.load(nonbreath_file, duration = 8) # too much nonbreath
     temp_dur = temp_nb.shape[0] / float(temp_sr_nb)
     nb_dur = nb_dur + temp_dur
     print('\tFile (non-breathing) sampling rate :', str(temp_sr_nb))
     print('\tFile (non-breathing) duration :', "{0:.2f}".format(temp_dur), 'seconds')
     nb = np.append(nb, temp_nb)  
     
 print('\n\tTotal duration (breathing) :', "{0:.2f}".format(b_dur), 'seconds')
 print('\n\tTotal duration (non-breathing) :', "{0:.2f}".format(nb_dur), 'seconds') 
 
 # windowing
 window_len = 1024
 b_feat = sliding_window(b, window_len)
 nb_feat = sliding_window(nb, window_len)
 
 # zero mean scaling within each window
 b_feat = b_feat - np.transpose(np.tile(np.mean(b_feat, axis = 1), (b_feat.shape[1], 1)))
 nb_feat = nb_feat - np.transpose(np.tile(np.mean(nb_feat, axis = 1), (nb_feat.shape[1], 1)))
 
 # fft features
 b_feat = np.abs(np.fft.fft(sliding_window(b, window_len), axis = 1))
 nb_feat = np.abs(np.fft.fft(sliding_window(nb, window_len), axis = 1))
 all_feats = np.vstack((b_feat, nb_feat))
 
 # create targets
 breath_targets = np.ones(b_feat.shape[0])
 nonbreath_targets = np.zeros(nb_feat.shape[0])
 all_targets = np.hstack((breath_targets, nonbreath_targets))