Exemplo n.º 1
0
def agcnn_detect_from_yolo(image, yolo_results, agcnn_models, method='opencv'):
    results = []
    count = 0
    for r in yolo_results:
        (x, y, w, h) = r['box']
        m = 20  #margin
        (x, y, w, h) = (x + w / 4, y, w / 2, h)
        (x, y, w, h) = (x - m, y - m, w + 2 * m, w + 2 * m)
        box = [int(x) for x in (x, y, w, h)]
        face_img = helper.crop(image, box)
        count = (count + 1) % 5
        # cv2.imshow('face'+str(count),face_img)

        # face = agcnn_predict(face_img, *agcnn_models[0:-1], method=method)
        face = agcnn_detect(
            face_img,
            r,
            agcnn_models,
            origin=box[0:2],
            method=method,
            margin=15,
        )
        if face is None:
            continue
        else:
            r['face'] = face
            results += [face]
    return results
Exemplo n.º 2
0
def preview_small_pores_detection_full(img2d_gray,
                                       percentile=2.5,
                                       min_large_contour_length=2000,
                                       window_size=200):

    img_without_large_contours = remove_large_contours(
        img2d_gray, min_large_contour_length=min_large_contour_length)
    global_thresh = np.percentile(img_without_large_contours.ravel(),
                                  percentile)

    #TODO: make image sampling more flexible
    count_of_center_points = np.min(img2d_gray.shape) // window_size

    frame_for_new_approach_img = np.zeros(
        [count_of_center_points * window_size] * 2, dtype=int)

    for x in np.arange(count_of_center_points) + 0.5:
        for y in np.arange(count_of_center_points) + 0.5:
            center_coords = np.ceil(np.asarray([x, y]) *
                                    window_size).astype(int)
            img_without_contours_frag = crop(img_without_large_contours,
                                             (window_size, window_size),
                                             center_coords)

            # new approach
            img_without_contours_frag = median(img_without_contours_frag)

            min_brightness = np.min(img_without_contours_frag)
            max_brightness = np.max(img_without_contours_frag)

            local_thresh = min_brightness + (max_brightness -
                                             min_brightness) * 0.5
            if local_thresh > global_thresh:
                local_thresh = global_thresh

            bin_cropped_fragment = img_without_contours_frag > local_thresh
            paste(frame_for_new_approach_img, bin_cropped_fragment,
                  center_coords)

    # fig, axes = plt.subplots(ncols=2, figsize=(14, 7), constrained_layout=True)
    # [ax.axis("off") for ax in axes]
    # [ax.imshow(img2d_gray, cmap=plt.cm.gray) for ax in axes]

    # mask_new = np.ma.masked_where(frame_for_new_approach_img, frame_for_new_approach_img)

    # axes[0].set_title("исходное изображение", fontsize=25)

    # axes[1].imshow(mask_new, cmap='hsv', interpolation='none')
    # axes[1].set_title("новый метод", fontsize=25)

    fig, ax = plt.subplots(figsize=(20, 20), constrained_layout=True)
    ax.axis("off")
    ax.imshow(img2d_gray, cmap=plt.cm.gray)

    mask_new = np.ma.masked_where(frame_for_new_approach_img,
                                  frame_for_new_approach_img)
    ax.imshow(mask_new, cmap='hsv', alpha=0.2, interpolation='none')
    ax.set_title("новый метод", fontsize=25)

    return fig
Exemplo n.º 3
0
def telemetry(sid, data):
    if data:
        # The current steering angle of the car
        steering_angle = data["steering_angle"]
        # The current throttle of the car
        throttle = data["throttle"]
        # The current speed of the car
        speed = data["speed"]
        # The current image from the center camera of the car
        imgString = data["image"]
        image = Image.open(BytesIO(base64.b64decode(imgString)))
        image_array = np.asarray(image)
        image_array = helper.crop(image_array, 0.35, 0.1)
        image_array = helper.resize(image_array, new_dim=(64, 64))

        transformed_image_array = image_array[None, :, :, :]

        steering_angle = float(
            model.predict(transformed_image_array, batch_size=1))

        throttle = controller.update(float(speed))

        print(steering_angle, throttle)
        send_control(steering_angle, throttle)

        # save frame
        if args.image_folder != '':
            timestamp = datetime.utcnow().strftime('%Y_%m_%d_%H_%M_%S_%f')[:-3]
            image_filename = os.path.join(args.image_folder, timestamp)
            image.save('{}.jpg'.format(image_filename))
    else:
        # NOTE: DON'T EDIT THIS.
        sio.emit('manual', data={}, skip_sid=True)
Exemplo n.º 4
0
def telemetry(sid, data):
    # The current steering angle of the car
    steering_angle = data["steering_angle"]

    # The current throttle of the car
    throttle = data["throttle"]

    # The current speed of the car
    speed = data["speed"]

    # The current image from the center camera of the car
    imgString = data["image"]
    image = Image.open(BytesIO(base64.b64decode(imgString)))
    image_array = np.asarray(image)

    image_array = helper.crop(image_array, 0.35, 0.1)
    image_array = helper.resize(image_array, new_dim=(64, 64))

    transformed_image_array = image_array[None, :, :, :]

    # This model currently assumes that the features of the model are just the images. Feel free to change this.

    steering_angle = float(model.predict(transformed_image_array,
                                         batch_size=1))
    # The driving model currently just outputs a constant throttle. Feel free to edit this.
    throttle = 0.3

    print('{:.5f}, {:.1f}'.format(steering_angle, throttle))

    send_control(steering_angle, throttle)
Exemplo n.º 5
0
def process_image(image,
                  num_of_angles,
                  noise_parameter=0,
                  noise_method=None,
                  reconstruct_sart=False,
                  reconstruct_filter='ramp',
                  detector_blurring=False,
                  source_blurring=False):
    sim = create_sinogram(num_of_angles, image)
    print(f'sinogram shape: {sim.shape}')
    if noise_method is None:
        pass
    elif noise_method == 'poisson':
        # sim = add_poisson_noise(sim, noise_parameter)
        sim, _ = add_poisson_noise_physical(
            sim,
            nominal_intensity=noise_parameter,
            detector_blurring=detector_blurring)
    else:
        raise ValueError('unknown noise_method param')
    # if source_blurring:
    #     sim = sinogram_source_blurring(sim)
    # if detector_blurring:
    #     sim = sinogram_detector_blurring(sim)
    rec = reconstruct(sim, reconstruct_sart, reconstruct_filter)
    print(f'reconstruction shape: {rec.shape}')
    return crop(rec, image.shape)
Exemplo n.º 6
0
def crop_cube(sample_name, shape_2d, center_2d, slice_numbers):
    n_slices = slice_numbers[1]-slice_numbers[0]
    data = dm.load_data_server(sample_name, slice_numbers)

    for img, shot_name in tqdm(data, total=n_slices):
        img = crop(img, shape_2d, center=center_2d)

        dm.save_tif(img, "crop_"+sample_name, shot_name)
Exemplo n.º 7
0
def get_2d_slice(num, file_id):
    data_folder = get_path(file_id)

    file_names = Path(data_folder).glob('*.tiff')
    file_names = list(file_names)
    img2d_gray = np.array(Image.open(file_names[num]))

    return crop(img2d_gray, (2000, 2000))
Exemplo n.º 8
0
def get_2d_mask_binary_closing(img2d,
                               pad_width=35,
                               disk_radius=35,
                               zoom_scale=0.1):
    merged_img = zoom(img2d, zoom_scale, order=1)
    result_paded = np.pad(merged_img,
                          pad_width=((pad_width, pad_width), (pad_width,
                                                              pad_width)),
                          mode='constant')
    img_mask = binary_closing(result_paded, structure=disk(disk_radius))
    return crop(zoom(img_mask, 1 / zoom_scale, order=1), img2d.shape)
Exemplo n.º 9
0
def glblstm(wins):
    InfoOfCys = 24
    hiddenUnit = 30
    CysNum = 25
    inputOfSeq = Input((CysNum, wins, InfoOfCys))
    listOfCysRegion = helper.crop()(inputOfSeq)

    maskingLayerForLocalRegion = (Masking(mask_value=0,
                                          input_shape=(wins, InfoOfCys)))

    localBLSTM = Bidirectional(
        LSTM(hiddenUnit,
             activation='relu',
             kernel_initializer=initializers.glorot_normal(),
             recurrent_initializer=initializers.glorot_normal()))

    listOfCysRegionAfterMASK = list()
    for i in range(len(listOfCysRegion)):
        listOfCysRegionAfterMASK.append(
            maskingLayerForLocalRegion(listOfCysRegion[i]))

    hiddens = list()
    for i in range(len(listOfCysRegionAfterMASK)):
        hiddens.append(
            Reshape(target_shape=(1, hiddenUnit * 2))(localBLSTM(
                listOfCysRegionAfterMASK[i])))

    inputForGlobal = concatenate(hiddens, axis=1)

    inputForGlobalAfterMask = (Masking(mask_value=0,
                                       input_shape=(CysNum, hiddenUnit *
                                                    2)))(inputForGlobal)
    inputForGlobalAfterMaskAfterBn = BatchNormalization()(
        inputForGlobalAfterMask)

    globalBLSTM = Bidirectional(
        LSTM(hiddenUnit,
             activation='relu',
             return_sequences=1,
             dropout=0.2,
             recurrent_regularizer=regularizers.l2(0.005),
             kernel_regularizer=regularizers.l2(0.005),
             bias_regularizer=regularizers.l2(0.005),
             recurrent_dropout=0.2))(inputForGlobalAfterMaskAfterBn)

    output = TimeDistributed(Dense(2, activation='softmax'))(globalBLSTM)
    model = Model(input=[inputOfSeq], output=[output])

    ## acc is keras Metric function and in our scenario it can used to messure the accuracy on residue level
    ## we implement a metric function to messure the accuracy on protein level
    model.compile(optimizer='adam',
                  loss={'time_distributed_1': 'binary_crossentropy'},
                  metrics=['acc', helper.acc_on_protein])
    return model
Exemplo n.º 10
0
def get_small_pores_mask(img2d_gray,
                         mask,
                         percentile_glob=2.5,
                         min_large_contour_length=2000,
                         window_size=200):

    # убираем большие контуры
    large_clusters_mask = find_big_ones_clusters(mask,
                                                 min_large_contour_length)
    img2d_gray = hide_image_elements(img2d_gray, large_clusters_mask) 
    global_thresh = np.percentile(img2d_gray.ravel(), percentile_glob)

    check_mask = find_big_ones_clusters(img2d_gray > global_thresh, min_large_contour_length)
    while np.any(check_mask):
        img2d_gray = hide_image_elements(img2d_gray, check_mask)
        check_mask = find_big_ones_clusters(img2d_gray > global_thresh, min_large_contour_length)
        print("deleting remnants")

    # количество маленьких окошек
    count_of_center_points_x, count_of_center_points_y = np.array(img2d_gray.shape) // window_size

    # выкидываем из изображения все, что вне окошек при их максимальном количестве,
    # оставляем frame. Т.е. на случай, если окошки не делят изображения нацело
    frame_shape = [count_of_center_points_x*window_size,
                   count_of_center_points_y*window_size]
    mask_frame = np.zeros(frame_shape, dtype=int)

    for x in np.arange(count_of_center_points_x) + 0.5:
        for y in np.arange(count_of_center_points_y) + 0.5:
            center_coords = np.ceil(np.asarray([x, y]) * window_size).astype(int)
            img2d_gray_frag = crop(img2d_gray, (window_size, window_size), center_coords)

            img2d_gray_frag = median(img2d_gray_frag)

            min_brightness = np.min(img2d_gray_frag)
            max_brightness = np.max(img2d_gray_frag)

            local_thresh = (min_brightness + max_brightness) * 0.5
            if local_thresh < global_thresh:
                local_thresh = global_thresh

            bin_cropped_fragment = img2d_gray_frag < local_thresh
            paste(mask_frame, bin_cropped_fragment, center_coords)
    
    mask_frame = binary_fill_holes(np.abs(mask_frame-1))

    check_mask = find_big_ones_clusters(mask_frame, min_large_contour_length)
    while np.any(check_mask):
        mask_frame = hide_image_elements(mask_frame, check_mask)
        check_mask = find_big_ones_clusters(mask_frame, min_large_contour_length)
        print("deleting remnants")

    return mask_frame.astype(bool)
Exemplo n.º 11
0
def divide_image_into_cubic_fragments(img, edge_size):
    count_of_center_points = np.asarray(img.shape) // edge_size
    img_fragments = []

    if img.ndim == 2:
        for x_coord in np.arange(count_of_center_points[0] + 1) + 0.5:
            for y_coord in np.arange(count_of_center_points[1] + 1) + 0.5:
                center_coords = np.ceil(
                    np.asarray([x_coord, y_coord]) * edge_size).astype(int)
                img_fragment = crop(img, (edge_size, edge_size), center_coords)
                img_fragments.append(img_fragment)
    elif img.ndim == 3:
        for x_coord in np.arange(count_of_center_points[0] + 1) + 0.5:
            for y_coord in np.arange(count_of_center_points[1] + 1) + 0.5:
                for z_coord in np.arange(count_of_center_points[2] + 1) + 0.5:
                    center_coords = np.ceil(
                        np.asarray([x_coord, y_coord, z_coord]) *
                        edge_size).astype(int)
                    img_fragment = crop(img, (edge_size, edge_size, edge_size),
                                        center_coords)
                    img_fragments.append(img_fragment)

    return img_fragments
def image_split(path, FACTOR, PATCH_SIZE, STRIDE):

    x_train = []
    y_train = []
    for i, file in enumerate(os.listdir(path)):

        # read the file using cv2
        hr = cv2.imread(path + '/' + file)

        # find the old and new image dimensions
        h, w, c = hr.shape

        # change the image color channel to YCrCb
        hr = cv2.cvtColor(hr, cv2.COLOR_BGR2YCrCb)
        hr = hr[:, :, 0]

        # degrade the images by downsizing and upsizing
        new_h = int(h / FACTOR)
        new_w = int(w / FACTOR)
        lr = cv2.resize(hr, (new_w, new_h), interpolation=cv2.INTER_LINEAR)
        lr = cv2.resize(lr, (w, h), interpolation=cv2.INTER_LINEAR)

        # number of stride steps
        w_steps = int((w - (PATCH_SIZE - STRIDE)) / STRIDE)
        h_steps = int((h - (PATCH_SIZE - STRIDE)) / STRIDE)

        #print('w: {}'.format(w))
        #print('h: {}'.format(h))
        #print('w_steps: {}'.format(w_steps))
        #print('h_steps: {}'.format(h_steps))

        hr = hr.astype(float) / 255
        lr = lr.astype(float) / 255

        for i in range(w_steps):
            for j in range(h_steps):

                hr_patch = hr[j * STRIDE:j * STRIDE + PATCH_SIZE,
                              i * STRIDE:i * STRIDE + PATCH_SIZE]
                lr_patch = lr[j * STRIDE:j * STRIDE + PATCH_SIZE,
                              i * STRIDE:i * STRIDE + PATCH_SIZE]

                if hr_patch.shape[0] == hr_patch.shape[1]:
                    x_train.append(lr_patch)
                    y_train.append(crop(hr_patch, 4))

    x_train = np.array(x_train, dtype=float)
    y_train = np.array(y_train, dtype=float)

    return x_train[..., np.newaxis], y_train[..., np.newaxis]
Exemplo n.º 13
0
def telemetry(sid, data):

    image_array = np.asarray(image)

    image_array = helper.crop(image_array, 0.35, 0.1)
    image_array = helper.resize(image_array, new_dim=(64, 64))

    transformed_image_array = image_array[None, :, :, :]

    # This model currently assumes that the features of the model are just the images. Feel free to change this.

    steering_angle = float(model.predict(transformed_image_array,
                                         batch_size=1))
    # The driving model currently just outputs a constant throttle. Feel free to edit this.
    throttle = 0.3

    print('{:.5f}, {:.1f}'.format(steering_angle, throttle))

    send_control(steering_angle, throttle)
Exemplo n.º 14
0
def agcnn_detect(image,
                 yolo_result,
                 models,
                 origin=(0, 0),
                 margin=0,
                 method='opencv'):
    faces = agcnn_face(image, models[-1])
    if len(faces) > 0:
        m = margin
        (x, y, w, h) = faces[0]
        box = (x - m, y - m, w + 2 * m, h + 2 * m)
        image = helper.crop(image, box)
        result = agcnn_predict(image, *(models[0:-1]), method=method)
        result['box'] = (x + origin[0], y + origin[1], w, h)
        result['confidence'] = w * h
        if 'face' in yolo_result:
            lr = yolo_result['face']  #last_result
            cr = result  #current_result
            _1 = cr['confidence'] + lr['confidence']
            # age
            _2 = cr['age'] * cr['confidence'] + lr['age_average'] * lr[
                'confidence']
            result['age_average'] = _2 / _1
            # gender
            _3 = 1 if cr['gender'] == gender_list[0] else -1
            _4 = 1 if lr['gender_average'] == gender_list[0] else -1
            _5 = _3 * cr['confidence'] + _4 * lr['confidence']
            result['gender_average'] = gender_list[_5 < 0]
            # confidence
            result['confidence'] = _1
        else:
            result['age_average'] = result['age']
            result['gender_average'] = result['gender']
        return result
    else:
        if 'face' in yolo_result:
            return yolo_result['face']
        else:
            return None
Exemplo n.º 15
0
def telemetry(sid, data):
    if data:
        # The current steering angle of the car
        steering_angle = data["steering_angle"]

        # The current throttle of the car
        throttle = data["throttle"]

        # The current speed of the car
        speed = data["speed"]

        # The current image from the center camera of the car
        imgString = data["image"]
        image = Image.open(BytesIO(base64.b64decode(imgString)))
        image_array = np.asarray(image)

        image_array = helper.crop(image_array, 0.35, 0.1)
        image_array = helper.resize(image_array, new_dim=(64, 64))

        transformed_image_array = image_array[None, :, :, :]

        # This model currently assumes that the features of the model are just the images. Feel free to change this.

        steering_angle = float(
            model.predict(transformed_image_array, batch_size=1))
        # The driving model currently just outputs a constant throttle. Feel free to edit this.
        throttle = 0.3

        print('{:.5f}, {:.1f}'.format(steering_angle, throttle))
        send_control(steering_angle, throttle)

        # save frame
        if args.image_folder != '':
            timestamp = datetime.utcnow().strftime('%Y_%m_%d_%H_%M_%S_%f')[:-3]
            image_filename = os.path.join(args.image_folder, timestamp)
            image.save('{}.jpg'.format(image_filename))
    else:
        # NOTE: DON'T EDIT THIS.
        sio.emit('manual', data={}, skip_sid=True)
Exemplo n.º 16
0
def get_small_pores_mask(img2d_gray,
                         percentile=2.5,
                         min_large_contour_length=2000,
                         window_size=200):

    img_without_large_contours = remove_large_contours(
        img2d_gray, min_large_contour_length=min_large_contour_length)
    global_thresh = np.percentile(img_without_large_contours.ravel(),
                                  percentile)

    #TODO: make image sampling more flexible
    count_of_center_points = np.min(img2d_gray.shape) // window_size
    mask_frame = np.zeros([count_of_center_points * window_size] * 2,
                          dtype=int)

    for x in np.arange(count_of_center_points) + 0.5:
        for y in np.arange(count_of_center_points) + 0.5:
            center_coords = np.ceil(np.asarray([x, y]) *
                                    window_size).astype(int)
            img_without_contours_frag = crop(img_without_large_contours,
                                             (window_size, window_size),
                                             center_coords)

            # new approach
            img_without_contours_frag = median(img_without_contours_frag)

            min_brightness = np.min(img_without_contours_frag)
            max_brightness = np.max(img_without_contours_frag)

            local_thresh = min_brightness + (max_brightness -
                                             min_brightness) * 0.5
            if local_thresh > global_thresh:
                local_thresh = global_thresh

            bin_cropped_fragment = img_without_contours_frag > local_thresh
            paste(mask_frame, bin_cropped_fragment, center_coords)

    return ~mask_frame.astype(bool)
    def recognize_gesture(self, image):
        """
            image : a 320x240 pixel RGB image in the form of a numpy array

            This function should locate the hand and classify the gesture.
            returns : (position, label)

            position : a tuple of (x1,y1,x2,y2) coordinates of bounding box
                x1,y1 is top left corner, x2,y2 is bottom right

            label : a single character. eg 'A' or 'B'
        """
        # print "In recognize_gesture"
        scales = [
            1.25, 1.015625, 0.78125, 0.546875, 1.5625, 1.328125, 1.09375,
            0.859375, 0.625, 1.40625, 1.171875, 0.9375, 0.703125, 1.71875,
            1.484375
        ]

        detectedBoxes = []  # [x,y,conf,scale]
        for sc in scales:
            detectedBoxes.append(
                image_pyramid_step(self.handDetector, image, scale=sc))

        side = [0 for i in xrange(len(scales))]
        for i in xrange(len(scales)):
            side[i] = 128 / scales[i]

        for i in xrange(len(detectedBoxes)):
            detectedBoxes[i][0] = detectedBoxes[i][0] / scales[i]  # x
            detectedBoxes[i][1] = detectedBoxes[i][1] / scales[i]  # y

        nms_lis = []  # [x1,x2,y1,y2]
        for i in xrange(len(detectedBoxes)):
            nms_lis.append([
                detectedBoxes[i][0], detectedBoxes[i][1],
                detectedBoxes[i][0] + side[i], detectedBoxes[i][1] + side[i],
                detectedBoxes[i][2]
            ])
        nms_lis = np.array(nms_lis)

        res = non_max_suppression_fast(nms_lis, 0.4)

        output_det = res[0]
        x_top = output_det[0]
        y_top = output_det[1]
        side = output_det[2] - output_det[0]
        position = [x_top, y_top, x_top + side, y_top + side]

        croppedImage = crop(image, x_top, x_top + side, y_top, y_top + side)
        hogvec = convertToGrayToHOG(croppedImage)

        prediction = self.signDetector.predict_proba([hogvec])[0]

        zi = zip(self.signDetector.classes_, prediction)
        zi.sort(key=lambda x: x[1], reverse=True)

        # To return the top 5 predictions
        final_prediction = []
        for i in range(5):
            final_prediction.append(
                self.label_encoder.inverse_transform(zi[i][0]))
        # print position,final_prediction

        return position, final_prediction
Exemplo n.º 18
0
    type=str,
    help=
    'Path to model definition json. Model weights should be on the same path.')
args = parser.parse_args()
with open(args.model, 'r') as jfile:
    model = model_from_json(json.load(jfile))

model.compile("adam", "mse")
weights_file = args.model.replace('json', 'h5')
model.load_weights(weights_file)

while True:
    ret, frame = cap.read()
    key = cv2.waitKey(50)
    image_array = np.asarray(frame)
    image_array = helper.crop(image_array, 0.35, 0.1)
    image_array = helper.resize(image_array, new_dim=(64, 64))
    transformed_image_array = image_array[None, :, :, :]
    transformed_image_array = tf.cast(transformed_image_array, tf.float32)
    steering_angle = float(model.predict(transformed_image_array,
                                         batch_size=1))
    cv2.imshow("keytest", frame)

    if key is 120:
        break
    # print(steering_angle)

    if steering_angle > Optimize_number:
        print("TURN RIGHT!!!")
    elif steering_angle < Optimize_number and steering_angle > -1 * Optimize_number:
        print("STRAIGHT")
Exemplo n.º 19
0
    def build(self):
        print("BUILD: ", self.name)

        # with tf.variable_scope(self.name) as vs:

        fmaps = self.fmaps_in
        num_channels = self.base_channels
        across = []
        print("ACTIVATION:  ", self.activation_type)
        print("fmaps_in : ", fmaps.shape)
        for layer in range(self.num_layers):
            for conv_pass in range(self.num_conv_passes):
                fmaps = tf.layers.conv3d(
                    inputs=fmaps,
                    filters=num_channels,
                    kernel_size=self.down_kernel_size[layer],
                    padding=self.padding_type,
                    data_format="channels_first",
                    activation=self.activation_type,
                    name="%s_down_layer_%i_conv_pass_%i" %
                    (self.name, layer, conv_pass))
            across.append(fmaps)

            fmaps = helper.downsample(
                fmaps_in=fmaps,
                downsample_type=self.downsample_type,
                downsample_factors=self.resample_factors[layer],
                padding_type=self.padding_type,
                voxel_size=self.voxel_size,
                name="%s_downsample_layer_%i" % (self.name, layer))

            print("layer ", (layer + 1), ": ", fmaps.shape)
            num_channels *= self.channel_inc_factor

            if layer == self.num_layers - 1:
                for conv_pass in range(self.num_conv_passes):
                    fmaps = tf.layers.conv3d(
                        inputs=fmaps,
                        filters=num_channels,
                        kernel_size=self.down_kernel_size[layer],
                        padding=self.padding_type,
                        data_format="channels_first",
                        activation=self.activation_type,
                        name="%s_bottom_conv_pass_%i" % (self.name, conv_pass))

        print("bottom   : ", fmaps.shape)

        for layer in reversed(range(self.num_layers)):
            num_channels /= self.channel_inc_factor
            fmaps = helper.upsample(
                fmaps_in=fmaps,
                num_channels=num_channels,
                upsample_type=self.upsample_type,
                upsample_factors=self.resample_factors[layer],
                activation_type=self.activation_type,
                padding_type=self.padding_type,
                voxel_size=self.voxel_size,
                name="%s_upsample_layer_%i" % (self.name, layer))

            cropped = helper.crop(across[layer], fmaps.get_shape().as_list())
            fmaps = tf.concat([cropped, fmaps], 1)

            for conv_pass in range(self.num_conv_passes):
                fmaps = tf.layers.conv3d(
                    inputs=fmaps,
                    filters=num_channels,
                    kernel_size=self.up_kernel_size[layer],
                    padding=self.padding_type,
                    data_format="channels_first",
                    activation=self.activation_type,
                    name="%s_up_layer_%i_conv_pass_%i" %
                    (self.name, layer, conv_pass))
            print("layer ", (layer + 1), ": ", fmaps.shape)

        print("fmaps_out: ", fmaps.shape)

        self.fmaps = fmaps
Exemplo n.º 20
0
def test_image_crop():
    helper.crop(os.path.join('sample_data',
                             'sample_circle.png'), (100, 20, 200, 100),
                os.path.join('sample_data', 'cropped.png'))
    helper.show(os.path.join('sample_data', 'cropped.png'))
Exemplo n.º 21
0
def preview_small_pores_detection_by_fragment(img2d_gray,
                                              plots=8,
                                              percentile=2,
                                              window_size=300):

    fig, axes = plt.subplots(ncols=3,
                             nrows=plots,
                             figsize=(21, 7 * plots),
                             constrained_layout=True)
    axes = axes.ravel()

    img_without_large_contours = remove_large_contours(
        img2d_gray, min_large_contour_length=3000)
    global_thresh = np.percentile(img_without_large_contours.ravel(),
                                  percentile)

    for i, _ in enumerate(axes):
        if i % 3 == 0:
            center_coords = np.asarray([
                np.random.randint(window_size // 2 + 1,
                                  img2d_gray.shape[0] - window_size // 2 - 1),
                np.random.randint(window_size // 2 + 1,
                                  img2d_gray.shape[0] - window_size // 2 - 1)
            ])
            img_2d_gray_frag = crop(img2d_gray, (window_size, window_size),
                                    center_coords)
            img_without_contours_frag = crop(img_without_large_contours,
                                             (window_size, window_size),
                                             center_coords)

            # ========================================
            axes[i].imshow(img_2d_gray_frag, cmap=plt.cm.gray)
            axes[i].set_title("original image", fontsize=25)

            # ========================================
            axes[i + 1].imshow(img_2d_gray_frag, cmap=plt.cm.gray)
            axes[i + 1].set_title("global threshold", fontsize=25)

            bin_cropped_fragment_glob = img_2d_gray_frag > np.percentile(
                img_2d_gray_frag.ravel(), 2)
            mask_cropped_fragment_glob = np.ma.masked_where(
                bin_cropped_fragment_glob, bin_cropped_fragment_glob)
            axes[i + 1].imshow(mask_cropped_fragment_glob,
                               cmap='hsv',
                               interpolation='none')

            # ========================================
            axes[i + 2].imshow(img_2d_gray_frag, cmap=plt.cm.gray)
            img_without_contours_frag = median(img_without_contours_frag)

            min_brightness = np.min(img_without_contours_frag)
            max_brightness = np.max(img_without_contours_frag)

            local_thresh = min_brightness + (max_brightness -
                                             min_brightness) * 0.5
            axes[i + 2].set_title(
                f"local-global threshold \n without boarders", fontsize=25)
            if local_thresh > global_thresh:
                local_thresh = global_thresh

            bin_cropped_fragment = img_without_contours_frag > local_thresh
            mask_cropped_fragment = np.ma.masked_where(bin_cropped_fragment,
                                                       bin_cropped_fragment)
            axes[i + 2].imshow(mask_cropped_fragment,
                               cmap='hsv',
                               alpha=0.4,
                               interpolation='none')

        axes[i].axis("off")

    return fig
Exemplo n.º 22
0
                if diff > 0:
                    thisRobot.roverMovePointTurn_rel(80)
                elif diff < 0:
                    thisRobot.roverMovePointTurn_rel(-80)
                thisRobot.roverMoveForward(10)
                thisRobot.roverMoveStart()
                continue
            elif dist_route < ROUTE_WIDTH / 2:
                thisRobot.mode = VIS
            else:
                thisRobot.mode = GPS

            if thisRobot.mode is VIS:
                ret, frame = cap.read()
                image_array = np.asarray(frame)
                image_array = helper.crop(image_array, 0.5, 0.2)

                image_array = helper.resize(image_array, new_dim=(64, 64))
                HSV_image_array = cv2.cvtColor(image_array, cv2.COLOR_BGR2HSV)
                transformed_image_array = HSV_image_array[None, :, :, :]
                transformed_image_array = tf.cast(transformed_image_array, tf.float32)
                steering_angle = float(model.predict(transformed_image_array, batch_size=1))

                INT_steering = int(steering_angle * 320)
                if INT_steering > 250:
                    INT_steering = 250
                elif INT_steering < -250:
                    INT_steering = -250
                INT_steering += 250
                # =======================Serial Write===========================
                dataFormat = "<{},510,1>".format(INT_steering)
Exemplo n.º 23
0
import helper
import cv2

IMG_PATH = './Images/'
img = cv2.imread(IMG_PATH + "sample" + '.jpg')

img = helper.crop(img, 0.1, 0.2)
cv2.imwrite('./Images/' + 'sample_crop' + '.jpg', img)

img, angles = helper.random_flip(img, 1, flipping_prob=1)
cv2.imwrite('./Images/' + 'sample_flip' + '.jpg', img)

img = helper.random_gamma(img)
cv2.imwrite('./Images/' + 'sample_gamma' + '.jpg', img)

img, angles = helper.random_shear(img, 0.5, shear_range=100)
cv2.imwrite('./Images/' + 'sample_sheer' + '.jpg', img)
print(angles)