Пример #1
0
 def __getContours(self,hsv_frame, contour_frame, color): 
     color_ranges = self.red_range if color == Color.RED else self.white_range
     final_mask = None
     for i,range in enumerate(color_ranges):
         lower = range[0]
         upper = range[1]
         current_mask = cv2.inRange(hsv_frame, lower, upper)
         if i == 0:
             final_mask = current_mask
         final_mask = cv2.bitwise_or(final_mask,current_mask)
     
     '''
     kernel = np.ones((2,2),np.uint8)
     erosion = cv2.erode(final_mask,kernel,iterations = 1)
     #dilation = cv2.dilate(erosion,kernel,iterations = 1)
     utils.imshow('erosion', erosion)
     #utils.imshow('dilation of erosion', dilation)
     '''
     _, contours, _ = cv2.findContours(final_mask,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
     
     if self.debug:
         img = cv2.drawContours(contour_frame, contours, -1, (0,255,0), 2)
         utils.imshow('contours', contour_frame)
     
     return contours
def improve_subpix(th, points, debug=None):

    if debug:
        s = 1
        w, h = th.shape[1], th.shape[0]
        vis = cv2.resize(th, (s * w, s * h))
        vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)
        draw_points(vis,
                    s * points,
                    colors=(0, 0, 255),
                    ret=vis,
                    rad=1,
                    thick=1)

    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1)
    corners = np.float32(points).reshape(-1, 1, 2)
    corners = cv2.cornerSubPix(th, corners, (5, 5), (-1, -1), criteria)
    points = corners.reshape(-1, 2)

    if dbg(debug, 'improve_subpix'):
        draw_points(vis,
                    s * points,
                    colors=(0, 255, 0),
                    ret=vis,
                    rad=1,
                    thick=1)
        utils.imshow('subpix', vis)
        cv2.waitKey()

    return points
Пример #3
0
    def detectTable(self, im, DEBUG=False):
        self.table.color = np.array(getTableColor(im, self.table.cardSet))
        COLOR_RANGE = [
            self.table.color - self.table.color * 0.6,
            self.table.color + self.table.color * 0.9
        ]
        color_mask = cv2.inRange(im, COLOR_RANGE[0], COLOR_RANGE[1])

        _, contours, hierarchy = cv2.findContours(color_mask,
                                                  cv2.RETR_EXTERNAL,
                                                  cv2.CHAIN_APPROX_SIMPLE)
        cnt = sorted(contours, key=cv2.contourArea, reverse=True)[0]
        [x_o, y_o, w_o, h_o] = cv2.boundingRect(cnt)

        self.table.outer_perimeter = [x_o, y_o, w_o, h_o]
        [x_i, y_i, w_i,
         h_i] = [x_o + 0.15 * w_o, y_o + 0.25 * h_o, w_o * 0.7, h_o * 0.5]
        self.table.inner_perimeter = [x_i, y_i, w_i, h_i]

        if DEBUG:
            mock = im.copy()
            # utils.imshow(color_mask, 0.8)
            cv2.rectangle(mock, (int(x_o), int(y_o)),
                          (int(x_o) + int(w_o), int(y_o) + int(h_o)),
                          (0, 255, 0), 2)
            cv2.rectangle(mock, (int(x_i), int(y_i)),
                          (int(x_i) + int(w_i), int(y_i) + int(h_i)),
                          (0, 255, 0), 2)
            utils.imshow(mock, 0.8)
        return
Пример #4
0
def evaluate_model(model, num_images):
    was_training = model.training
    model.eval()
    images_so_far = 0
    fig = plt.figure()
    actuals, probabilities = [], []
    test_classes_to_idx = classes_to_idx['test']
    with torch.no_grad():
        for i, (inputs, labels) in enumerate(dataloaders['test']):
            inputs = inputs.to(device)
            labels = labels.to(device)

            outputs = model(inputs)
            _, preds = torch.max(outputs, 1)
            sm = torch.nn.Softmax()
            probabilities = sm(outputs)
            #Converted to probabilities
            labels = labels.detach().cpu().numpy()
            for j in range(inputs.size()[0]):
                images_so_far += 1
                probability = probabilities[j][preds[j]]
                label = test_classes_to_idx[labels[j]]
                ax = plt.subplot(num_images // 2, 2, images_so_far)
                ax.axis('off')
                ax.set_title('predicted: {}, ({:.2f}), actual: {}'.format(
                    class_names[preds[j]], probability, label))
                imshow(inputs.cpu().data[j])

                if images_so_far == num_images:
                    model.train(mode=was_training)
                    return
        model.train(mode=was_training)
Пример #5
0
    def run(self, show=False, resize_factor=0.2):
        self.all_img_points = []
        self.files = []
        found = 0
        for imgpath in self.imgpaths:
            img = imread(imgpath)
            img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
            self.imgsize = img_gray.shape
            img_resized = cv2.resize(img,
                                     None,
                                     fx=resize_factor,
                                     fy=resize_factor)
            ret, corners = cv2.findChessboardCorners(img_gray,
                                                     self.chessboard_shape,
                                                     None)
            if ret:
                print('{}: corners found.'.format(imgpath))
                corners_subpix = cv2.cornerSubPix(img_gray, corners, (11, 11),
                                                  (-1, -1), self.criteria)
                if show:
                    cv2.drawChessboardCorners(img_resized,
                                              self.chessboard_shape,
                                              corners_subpix * resize_factor,
                                              ret)
                    imshow(img_resized, title=imgpath)
                self.all_img_points.append(corners_subpix)
                self.files.append(os.path.split(imgpath)[1])
                found += 1
            else:
                print('{}: corners not found.'.format(imgpath))

        self.all_world_points = [self.world_pts] * len(self.all_img_points)
        self._calibrate()
Пример #6
0
    def run(self, hr_img, lr_img):
        self.train_op = tf.train.AdamOptimizer(
            learning_rate=self.learning_rate).minimize(self.loss)
        self.sess.run(tf.global_variables_initializer())
        print('run: ->', hr_img.shape)
        # shape = np.zeros(hr_img.shape)
        # err_ = []
        # print(shape)
        for er in range(self.epoch):
            # image = tf.reshape(image,[image.shape[0],image.shape[1]])
            _, x = self.sess.run([self.train_op, self.loss],
                                 feed_dict={
                                     self.images: lr_img,
                                     self.label: hr_img
                                 })

            print(x)

        result = self.pred.eval({self.images: lr_img})
        result = result * 255 / (1e3 * 1e-5)
        imshow_spectrum(self.sess.run(tf.fft2d(result)))
        # plt_imshow(result)
        # result = np.clip(result, 0.0, 255.0).astype(np.uint8)
        result = np.abs(result).astype(np.uint8)
        imshow(result)
        plt_imshow(result)
        lr = self.sess.run([self.images],
                           feed_dict={
                               self.images: lr_img,
                               self.label: hr_img
                           })
        print(result + (np.asarray(lr) * 255 / (1e3 * 1e-5)))
        plt_imshow(result + (np.asarray(np.squeeze(lr)) * 255 / (1e3 * 1e-5)))

        return result
Пример #7
0
def visualize_model(model, num_images=6):
    was_training = model.training
    model.eval()
    images_so_far = 0
    fig = plt.figure()

    with torch.no_grad():
        for i, (inputs, labels) in enumerate(dataloaders['val']):
            inputs = inputs.to(device)
            labels = labels.to(device)

            outputs = model(inputs)
            _, preds = torch.max(outputs, 1)

            for j in range(inputs.size()[0]):
                images_so_far += 1
                ax = plt.subplot(num_images // 2, 2, images_so_far)
                ax.axis('off')
                ax.set_title('predicted: {}'.format(class_names[preds[j]]))
                utils.imshow(inputs.cpu().data[j])

                if images_so_far == num_images:
                    model.train(mode=was_training)
                    return
        model.train(mode=was_training)
Пример #8
0
def test(args):
    device = torch.device("cuda" if args.cuda else "cpu")
    test_ds, test_dl, classes_num = getDataLoader(args)
    model = SimpleConvNet(classes_num, args.image_size).to(device)
    model.load_state_dict(torch.load(args.model))
    model.eval()
    accuracy = 0.0
    num_images = 9

    with torch.no_grad():
        for images, labels in test_dl:
            images = images.to(device)
            labels = labels.to(device)

            outputs = model(images)
            _, preds = torch.max(outputs, 1)

            accuracy += torch.sum(preds == labels).item()

    for j in range(num_images):
        ax = plt.subplot(num_images // 3, 3, j + 1)
        ax.axis('off')
        ax.set_title(f'predicted: {test_ds.classes[preds[j]]}')
        imshow(images[j].cpu())
    plt.show()
    accuracy /= len(test_ds)
    print(
        f"Accuracy of the network on the test dataset is {100 * accuracy:4f}%")
Пример #9
0
def detect_marker_video(dictionary,
                        video_path,
                        isShow=True,
                        isSave=True,
                        savename=None,
                        savedirpath=None):
    """Detects and shows detected board in the video."""

    cap = cv2.VideoCapture(video_path)
    cnt = 0
    while (cap.isOpened()):
        cnt += 1
        ret, frame = cap.read()
        if not ret:
            break
        corners, ids, _ = aruco.detectMarkers(frame, dictionary)
        if ids is None:
            continue
        aruco.drawDetectedMarkers(frame, corners, ids, (0, 255, 0))
        if isSave:
            if savename is None or savedirpath is None:
                print("Error: Please specify save marker path.")
                return -1
            saveimg_path = osp.join(savedirpath,
                                    str(savename) + '_' + str(cnt) + '.png')
            cv2.imwrite(saveimg_path, frame)
        if isShow:
            utils.imshow(img=frame, wsec=10, width=1000)

    cap.release()
    cv2.destroyAllWindows()
Пример #10
0
def look(direction, image, cardSet, x1, DEBUG=False, VERBOSE=False):
    x2 = x1 + cardSet.width_mode
    if direction == -1:
        new_x1 = x1 - cardSet.distance_mode
    elif direction == 1:
        new_x1 = x1 + cardSet.distance_mode
    elif direction == 0:
        new_x1 = x1
    else:
        raise Exception("detectTableCards/look argument error")

    if VERBOSE: print("Miro: " + str(direction))

    new_x2 = new_x1 + cardSet.width_mode
    y1 = cardSet.y
    y2 = y1 + cardSet.height_mode

    card_im = image[y1 - 10:y2 + 10, new_x1 - 10:new_x2 + 10]
    if DEBUG: utils.imshow(card_im, 2)
    ret = identifyCards(card_im, DEBUG)  # lista de tuplas valor, palo
    if ret:
        value, suit = ret[0]
        if VERBOSE:
            print("\tValor: " + str(value))
            print("\tPalo: " + str(suit))
        card = Card(value, suit, [[new_x1, y1], [new_x2, y2]])
        if direction == 0:
            return cardSet.add(card)
        else:
            return look(direction, image, cardSet.add(card), new_x1, DEBUG)
    else:
        return cardSet
Пример #11
0
    def info(self, use_logging=True, log_dir=None):
        if use_logging:
            logger.info('- Training-img set:\t{}'.format(
                self.train_imgs.shape))
            logger.info('- Training-label set:\t{}'.format(
                self.train_labels.shape))
            logger.info('- Training-wmap set:\t{}'.format(
                self.train_wmaps.shape))
            logger.info('- Test-img set:\t\t{}'.format(self.test_imgs.shape))

            logger.info('- image shape:\t\t{}'.format(self.img_shape))
        else:
            print('- Training-img set:\t{}'.format(self.train_imgs.shape))
            print('- Training-label set:\t{}'.format(self.train_labels.shape))
            print('- Training-wmap set:\t{}'.format(self.train_wmaps.shape))
            print('- Test-img set:\t\t{}'.format(self.test_imgs.shape))
            print('- image shape:\t\t{}'.format(self.img_shape))

        print(
            ' [*] Saving data augmented images to check U-Net fundamentals...')
        for idx in range(self.num_train):
            img_, label_, wmap_ = self.train_imgs[idx], self.train_labels[
                idx], self.train_wmaps[idx]
            utils.imshow(img_, label_, wmap_, idx, log_dir=log_dir)
            utils.test_augmentation(img_, label_, wmap_, idx, log_dir=log_dir)
            utils.test_cropping(img_,
                                label_,
                                wmap_,
                                idx,
                                self.input_size,
                                self.output_size,
                                log_dir=log_dir)
        print(' [!] Saving data augmented images to check U-Net fundamentals!')
Пример #12
0
def walk_contours_by_kbd(contours,
                         base_img,
                         color=(0, 0, 255),
                         thickness=1,
                         draw_measurements=True,
                         draw_visited=False,
                         on_next_contour=None):
    if draw_visited:
        img = base_img.copy()
    for contour in contours:
        if not draw_visited:
            img = base_img.copy()
        contour.draw(img,
                     color,
                     thickness=thickness,
                     draw_measurements=draw_measurements)

        on_next_contour and on_next_contour(contour)

        utils.imshow(contour=img)
        if cv2.waitKey() == 27:
            return

    if len(contours):
        while cv2.waitKey() != 27:
            pass
    def sample(self, show=True):
        self.build()

        input_rgb = next(self.sample_generator)

        # OLD : feed_dic = {self.input_rgb: input_rgb}
        feed_dic = {
            self.input_rgb: input_rgb[:, :, :, 0:3],
            self.input_rgb_prev: input_rgb[:, :, :, 3:6]
        }

        step, rate = self.sess.run([self.global_step, self.learning_rate])
        fake_image, input_gray = self.sess.run([self.sampler, self.input_gray],
                                               feed_dict=feed_dic)
        fake_image = postprocess(tf.convert_to_tensor(fake_image),
                                 colorspace_in=self.options.color_space,
                                 colorspace_out=COLORSPACE_RGB)

        # OLD : img = stitch_images(input_gray, input_rgb, fake_image.eval())
        img = stitch_images(input_gray, input_rgb[:, :, :, 3:6],
                            fake_image.eval())

        if not os.path.exists(self.samples_dir):
            os.makedirs(self.samples_dir)

        sample = self.options.dataset + "_" + str(step).zfill(5) + ".png"

        if show:
            imshow(np.array(img), self.name)
        else:
            print('\nsaving sample ' + sample + ' - learning rate: ' +
                  str(rate))
            img.save(os.path.join(self.samples_dir, sample))
Пример #14
0
def estimate_marker_pose_image(
        dictionary,
        marker_length,
        img_path,
        camera_matrix,
        dist_coeffs,
        isShow=True,
        isSave=True,
        savename=None,
        savedirpath=None):
    """Reads an image and saves and/or shows the result images."""

    frame = cv2.imread(img_path)
    frame = pose_esitmation(
        frame, dictionary, marker_length, camera_matrix, dist_coeffs)
    if frame is None:
        return
    if isSave:
        if savename is None or savedirpath is None:
            print("Error: Please specify save marker path.")
            return -1
        saveimg_path = osp.join(
            savedirpath, str(savename)+'.png')
        cv2.imwrite(saveimg_path, frame)
    if isShow:
        utils.imshow(img=frame, width=1000)
    cv2.destroyAllWindows()
Пример #15
0
def show(imagesWithSharpness):
    for i, (imagePath, sharpness) in enumerate(imagesWithSharpness):
        img = cv2.imread(imagePath)
        img = fit_image_to_shape(img, (1024, 1024))
        info = i, sharpness, os.path.basename(imagePath)
        imshow(img=(img, info))
        if cv2.waitKey() == 27:
            break
Пример #16
0
 def __getHSV(self,frame):
     frame_removedbumpers = cv2.bitwise_and(frame,frame, mask = self.mask)
     gaussblur = cv2.bilateralFilter(frame_removedbumpers,9,75,75)
     hsv = cv2.cvtColor(gaussblur, cv2.COLOR_BGR2HSV)	
     if self.debug:
         utils.imshow('hsv_blurred_frame',hsv)
         utils.imshow('frame_removed_bumpers',frame_removedbumpers)
     return hsv
Пример #17
0
 def forward_once(self, x):
     self.cnt += 1
     output = self.cnn1(x)
     draw_np = output.cpu().detach().reshape([8, 1, 100, 100])
     imshow(torchvision.utils.make_grid(draw_np),
            name='output/pic{}'.format(self.cnt))
     output = output.view(output.size()[0], -1)
     output = self.fc1(output)
     return output
Пример #18
0
 def sample_image(num_samples=16):
     noise = np.random.normal(size=(num_samples * NUM_CLASSES,
                                    *NOISE_INPUT_SHAPE))
     label = np.zeros(shape=(num_samples * NUM_CLASSES, NUM_CLASSES),
                      dtype=np.float32)
     for i in range(NUM_CLASSES):
         label[np.arange(i * num_samples, (i + 1) * num_samples), i] = 1.
     sampled = g.predict([noise, label])
     imshow(sampled, color=True)
Пример #19
0
def valid():
    print("Loading checkpoint...")
    autoencoder.load_state_dict(torch.load("./state_dicts/autoencoder.pkl"))
    dataiter = iter(testloader)
    images, labels = dataiter.next()
    images = get_torch_vars(images)
    decoded_imgs = autoencoder(images)[0]

    imshow(torchvision.utils.make_grid(images))
    imshow(torchvision.utils.make_grid(decoded_imgs.data))
Пример #20
0
def identifyCards(image, DEBUG=False):
    image_area = image.shape[0] * image.shape[1]
    COLOR_RANGE = [175, 255]  # TODO pendiente de revisar
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    color_mask = cv2.inRange(gray, COLOR_RANGE[0], COLOR_RANGE[1])
    m = np.mean(color_mask)
    if m < 100:
        if DEBUG: print("Not a card")
        return None

    _, contours, hierarchy = cv2.findContours(color_mask, cv2.RETR_TREE,
                                              cv2.CHAIN_APPROX_SIMPLE)
    cards = []
    min_w = image_area / 1600
    min_h = image_area / 800
    while contours:
        contour1 = contours.pop()
        [x1, y1, w1, h1] = cv2.boundingRect(contour1)
        if w1 > min_w and h1 > min_h:
            for contour2 in contours:
                [x2, y2, w2, h2] = cv2.boundingRect(contour2)
                if w2 > min_w and h2 > min_h:
                    similar_x = x1 - x1 * 0.7 < x2 < x1 + x1 * 0.7
                    similar_w = w1 - w1 * 1.3 < w2 < w1 + w1 * 1.3
                    similar_h = h1 - h1 * 0.5 < h2 < h1 + h1 * 0.5
                    y_condition = y1 + h1 <= y2 < y1 + 1.5 * h1
                    if all([similar_x, similar_w, similar_h, y_condition]):
                        value_im = cv2.resize(image[y1:y1 + h1, x1:x1 + w1, 0],
                                              (0, 0),
                                              fx=4,
                                              fy=4)
                        value = obtain_value(value_im)

                        suit_im = image[y2:y2 + h2, x2:x2 + w2, :]
                        suit = obtain_suit(suit_im, contour2)

                        if DEBUG:
                            cv2.rectangle(image, (x1, y1), (x1 + w1, y1 + h1),
                                          (255, 0, 255), 1)
                            cv2.rectangle(image, (x2, y2), (x2 + w2, y2 + h2),
                                          (255, 0, 255), 2)

                        cards.append((value, suit))
                        if len(cards) == 2:
                            if DEBUG:
                                utils.imshow(image, 2)
                                print(cards)
                            return cards

    if DEBUG:
        utils.imshow(image, 2)
        print(cards)

    return cards
Пример #21
0
def walk_cv_contours_by_kbd(polylines, base_img):
    for poly in polylines:
        img = base_img.copy()
        cv2.polylines(img, [poly], False, utils.random_color(), thickness=2)
        utils.imshow(dd=img)
        if cv2.waitKey() == 27:
            return

    if len(polylines):
        while cv2.waitKey() != 27:
            pass
def main():
    imreadMode = 'opencv'
    readFns = dict(opencv=cvImread, pil=pilImread)
    imgFiles = sorted(
        glob.glob('/home/trevol/hdd/Datasets/counters/2_from_phone/00*.jpg'))
    for imgFile in cycle(imgFiles):
        img = readFns[imreadMode](imgFile)
        print(img.shape)
        imshow(img=(fit_image_to_shape(img, (1024, 1024)),
                    [imreadMode, imgFile]))
        if cv2.waitKey() == 27:
            break
Пример #23
0
def main():
    # downloading the dataset
    transform = torchvision.transforms.Compose([
        torchvision.transforms.ToTensor(),
        torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    trainset = torchvision.datasets.CIFAR10(root="/data",
                                            train=True,
                                            download=True,
                                            transform=transform)

    testset = torchvision.datasets.CIFAR10(root="/data",
                                           train=False,
                                           download=True,
                                           transform=transform)

    train_set, train_set_label, validation_set, validation_set_label = ut.train_val_split(
        trainset)
    flat_test, labels = ut.process_test_set(testset)

    print("The splits are: ")
    print(train_set.shape, train_set_label.shape)
    print(validation_set.shape)

    #training
    myNN = NeuralNetwork(OUTPUTS, IMAGESIZE, BATCHSIZE, LEARNINGRATE, LAYERS)

    # # starting the training
    myNN.train(train_set, train_set_label, validation_set,
               validation_set_label, ITERATIONS)
    myNN.save()
    myNN.clean()

    # testing
    myNNtest = NeuralNetwork(OUTPUTS, IMAGESIZE, BATCHSIZE, LEARNINGRATE,
                             LAYERS)
    myNNtest.load(wFile, bFile)
    print("validation accuracy for current",
          myNNtest.check(validation_set, validation_set_label))
    print("train accuracy for current",
          myNNtest.check(train_set, train_set_label))

    images, label, predictions = myNNtest.pred(flat_test.T, labels)

    classes = [
        'plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship',
        'truck'
    ]
    for x in range(10):
        ut.imshow(testset[x][0])
        print("Ground Truth: ", label[x], classes[label[x]])
        print("prediction: ", predictions[x], classes[predictions[x]])
Пример #24
0
 def drawShots(self,draw_frame,parameters):
     color_dict = {
         Color.RED : (0,0,255),
         Color.WHITE : (255,255,255)
     }
     for shot in self.shot_summaries:
         #cv2.putText(table_frame,'x',getIntTuple(scoring_position), cv2.FONT_HERSHEY_SIMPLEX, .7,color_dict[getColor(ball_id,all_balls)],2,cv2.LINE_AA)
         if shot.was_scored:
             draw_frame = cv2.circle(draw_frame,utils.getIntTuple(shot.starting_point),parameters.drawn_circle_radius,color_dict[shot.ball.color],2)
         else:
             draw_frame = cv2.circle(draw_frame,utils.getIntTuple(shot.starting_point),parameters.drawn_circle_radius/3,color_dict[shot.ball.color],2)
     utils.imshow('shot chart', draw_frame)
     cv2.waitKey(0)
Пример #25
0
def getTableColor(image, cardSet, DEBUG=False):
    COLOR_RANGE = [150, 255]  # Color de una carta
    x1 = cardSet.x[0] - cardSet.width_mode
    y1 = cardSet.y - cardSet.width_mode
    x2 = cardSet.x[0] + 2 * cardSet.width_mode
    y2 = cardSet.y

    table_sample = image[y1:y2, x1:x2, :]
    if DEBUG: utils.imshow(table_sample, 2)
    b = int(stats.mode(table_sample[:, :, 0], axis=None)[0])
    g = int(stats.mode(table_sample[:, :, 1], axis=None)[0])
    r = int(stats.mode(table_sample[:, :, 2], axis=None)[0])
    return [b, g, r]
Пример #26
0
def getWhite(image, cardSet, DEBUG=False):
    COLOR_RANGE = [150, 255]  # Color de una carta
    x1 = cardSet.x[0]
    y1 = cardSet.y
    x2 = x1 + cardSet.width_mode
    y2 = y1 + cardSet.height_mode
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    color_mask = cv2.inRange(gray, COLOR_RANGE[0], COLOR_RANGE[1])
    masked_im = cv2.bitwise_and(gray, gray, mask=color_mask)
    card = masked_im[y1:y2, x1:x2]
    if DEBUG: utils.imshow(card, 3)
    color = np.median(card[card > COLOR_RANGE[0]])
    return color
Пример #27
0
def extract_layer(image):
    assert len(image.shape) == 3, 'Require RGB image'
    line_art_value = get_line_art_value(image)
    h, w, _ = image.shape
    mask = np.ones((h, w)) * -1

    # Convert rgb image to (h, w, 1)
    b, r, g = cv2.split(image)
    b, g, r = b.astype(np.uint64), g.astype(np.uint64), r.astype(np.uint64)
    processed_image = np.array(b + 300 * (g + 1) + 300 * 300 * (r + 1))

    uniques = np.unique(processed_image)

    index = 0
    result = {}

    for unique in uniques:
        # Get coords by color
        if unique != line_art_value:
            continue

        rows, cols = np.where(processed_image == unique)
        image_tmp = np.zeros_like(processed_image)
        image_tmp[rows, cols] = 255
        imshow(image_tmp)

        # Get components
        labels = measure.label(image_tmp, connectivity=1, background=0)

        for region in measure.regionprops(labels,
                                          intensity_image=processed_image):
            if region['area'] <= 10:
                continue

            result[index] = {
                "centroid": np.array(region.centroid),
                "area": region.area,
                "image": region.image.astype(np.uint8) * 255,
                "label": index + 1,
                "coords": region.coords,
                "bbox": region.bbox,
                "min_intensity": region.min_intensity,
                "mean_intensity": region.mean_intensity,
                "max_intensity": region.max_intensity,
                'orientation': region.orientation
            }
            mask[region['coords'][:, 0], region['coords'][:, 1]] = index
            index += 1

    return result, mask
    pass
Пример #28
0
 def show_samples(self):
     # get some random training images
     dataiter = iter(self.train_loader)
     images, labels = dataiter.next()
     index = []
     for i in range(len(self.classes)):
         for j in range(len(labels)):
             if labels[j] == i:
                 index.append(j)
                 break
     imshow(
         torchvision.utils.make_grid(images[index],
                                     nrow=len(self.classes),
                                     scale_each=True), str(self.classes))
Пример #29
0
 def generate_from_zs(self, zs, truncation_psi = 0.5):
     Gs_kwargs = dnnlib.EasyDict()
     Gs_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
     Gs_kwargs.randomize_noise = False
     if not isinstance(truncation_psi, list):
         truncation_psi = [truncation_psi] * len(zs)
         
     for z_idx, z in log_progress(enumerate(zs), size = len(zs), name = "Generating images"):
         Gs_kwargs.truncation_psi = truncation_psi[z_idx]
         noise_rnd = np.random.RandomState(1) # fix noise
         tflib.set_vars({var: noise_rnd.randn(*var.shape.as_list()) for var in self.noise_vars}) # [height, width]
         images = self.Gs.run(z, None, **Gs_kwargs) # [minibatch, height, width, channel]
         img = PIL.Image.fromarray(images[0], 'RGB')
         imshow(img)
Пример #30
0
def process_bb(frame, tids, bbs_normed, bbs_normed_all, bbs_draw):
    # classify tids
    set_tid = set(bbs_normed_all.keys())
    set_tid_prev = set(bbs_normed.keys())
    tid_inter = list(set_tid & set_tid_prev)  # intersected tids
    tid_new = list(set_tid - set_tid_prev)  # new tids
    tid_disapp = list(set_tid_prev - set_tid)  # disappeared tids

    # update existing bbs
    for tid in tid_inter:
        bbs_normed[tid] = bbs_normed_all[tid]

    # add new bbs
    for tid in tid_new:
        if len(tids) < O:
            oids_free = list(set(range(1, O + 1)) - set(tids.keys()))
            tids[oids_free[0]] = tid
            bbs_normed[tid] = bbs_normed_all[tid]

    # delete disappeared bbs
    tids = {k: v for k, v in tids.items() if v not in tid_disapp}
    bbs_normed = {k: v for k, v in bbs_normed.items() if k not in tid_disapp}

    # get a bb_normed matrix, where the 1st column denotes the bb exsitence
    bb_mat = np.zeros((O, 5), 'float')
    for oid in range(1, O + 1):
        if oid in tids.keys():  # exist
            tid = tids[oid]
            bb_mat[oid - 1, 0] = 1
            bb_mat[oid - 1, 1:5] = bbs_normed[tid]
        else:  # not exist
            bb_mat[oid - 1] = 0

    # draw bb masks
    bb_mask = np.zeros((1080, 1920), 'uint8')
    for bb_draw in bbs_draw:
        cv.fillConvexPoly(bb_mask, bb_draw, 255)

    # visualize & save
    if arg.v == 1:
        fg_mask = cv.imread(path.join(fg_mask_dir, str(frame) + '.jpg'), 0)
        fg_mask_vis = fg_mask // 2 + bb_mask // 2
        print(bb_mat)
        utils.imshow(bb_mask, hv, wv, 'bb_mask')
        utils.imshow(fg_mask_vis, hv, wv, 'fg_mask_vis', 1)
    else:
        np.save(path.join(bb_dir, str(frame)), bb_mat)
        cv.imwrite(path.join(bb_mask_dir, str(frame) + '.jpg'), bb_mask)

    return tids, bbs_normed
Пример #31
0
def quantize(img, num_bins, debug=False):
	log_level = logging.DEBUG if debug else logging.INFO
	print("log level: {0}".format(log_level))
	logging.basicConfig(level = log_level)
	logging.info("quantizing image")

	img1 = img.copy()
	logging.debug(img1.dtype)
	# typecast
	# have to figure out the right class to typecast to
	#
	# divide by range to find bin values
	#range_bin_rgb = 1.0/num_bins
	logging.debug("min max: {0}".format(utils.min_max(img1)))
	np.multiply(img1, num_bins, out=img1)
	# get floored
	np.floor(img1, out=img1)
	
	logging.debug("min max: {0}".format(utils.min_max(img1)))
	#
	# convert to num_bins-base
	operator = np.array([num_bins**0,num_bins**1,num_bins**2])
	trash = np.tile(operator,(img1.shape[0],img1.shape[1],1))
	logging.debug(img1[:2,:2,:])	
	np.multiply(img1,trash,out=img1)
	logging.debug(img1[:2,:2,:])
	logging.debug("min max: {0}".format(utils.min_max(img1)))

	# sum across the 3rd dim to find bin values
	img2 = np.sum(img1,2)
	#print(img1.shape)
	logging.debug("img2 shape: {0}".format(img2.shape))
	logging.debug(img2[:2,:2])
	logging.debug("min max: {0}".format(utils.min_max(img2)))


	#plt.imshow(img2/(num_bins_rgb**3-1.0),cmap=plt.cm.jet)
	#plt.imshow(img2,cmap=plt.cm.jet)
	utils.imshow(img2, colorbar = True, cmap=plt.cm.jet, log_level = log_level)
	#plt.colorbar()
	#plt.show()
	utils.imshow(img, log_level = log_level)
	#plt.show()

	# map uniq labels
	img2_uniq = np.sort(np.unique(img2))
	logging.debug("expected range = [0, {0}]".format(num_bins**3-1))
	logging.debug("uniq values: {0}".format(img2_uniq))
	#img3 = np.put(img2, )
	return img2
 def get_test_images(self, n, transforms, showimage=True):
     img = []
     for t in transforms:
         ds = TransformDS(self.testset, t)
         dl = DataLoader(ds, batch_size=n, num_workers=0)
         # Print test images
         for i, data in enumerate(dl):
             x, y = data[0].to(self.device), data[1].to(self.device)
             img.append(x[:n, :, :, :])
             break
     if showimage:
         x = torch.cat(img)
         utils.imshow(torchvision.utils.make_grid(x, n))
     return img
def debug_images_show(train_loader_for_classification):
    ##################################################################
    #
    # Images show for debug
    #
    ##################################################################
    # get some random training images
    dataiter = iter(train_loader_for_classification)
    images, labels = dataiter.next()
    # show images
    print("images.shape ", images.shape)
    utils.imshow(torchvision.utils.make_grid(
        images))  # images = Tensor of shape (B x C x H x W)
    # print labels
    print(' '.join('%5s' % labels[j] for j in range(params.batch_size)))
Пример #34
0
def main():
    # image = cv2.imread("../counter_images/01305.png", cv2.IMREAD_GRAYSCALE)
    image = cv2.imread("../counter_images/01305.png")
    image = cv2.blur(image, (5, 5))
    # image = 255 - image

    # image = image[59:201, 103:187]
    saliency = cv2.saliency_StaticSaliencyFineGrained.create()
    ret, salImage = saliency.computeSaliency(image)

    salImage = np.uint8(salImage * 255 / salImage.max())

    imshow(image, salImage)

    cv2.waitKey()
Пример #35
0
 def __findBalllikeContoursPositions(self,frame_circles, contours):
     good_circles = []
     for i,cont in enumerate(contours):
         area = cv2.contourArea(cont)
         if area < self.min_area or area > self.max_area:
              continue
         (x,y),r = cv2.minEnclosingCircle(cont)
         circle = Circle(x,y,r)
         if circle.r > self.max_ball_radius:
             continue
         if area / circle.r**2 < self.min_area_to_radius_sqaured_ratio:
             continue
             
         if self.debug:
             cv2.circle(frame_circles,(int(x),int(y)),self.drawn_circle_radius,(0,255,0),2)
             utils.imshow('contour circles',frame_circles)
         good_circles.append(circle)
     return good_circles
Пример #36
0
def main(img_path, image=None, mask_path_prefix=None):
    global img, mode
    if image == None:
        img = cv2.imread(img_path)
    else:
        img = image
    cv2.namedWindow("image")
    cv2.setMouseCallback("image", draw_circle)

    while 1:
        utils.imshow("image", img)
        k = cv2.waitKey(1) & 0xFF
        if k == ord("m"):
            mode += 1
        if k == ord("d"):
            mask = make_mask(img)
            if mask_path_prefix != None:
                cv2.imwrite("masks/" + mask_path_prefix + "-mask.png", mask)
            cv2.destroyAllWindows()
            return mask
        elif k == 27:
            break
    cv2.setMouseCallback("image", nothing)
    cv2.destroyAllWindows()
Пример #37
0
def main(argv):
    img_path = argv[0]
    img = cv2.imread(img_path)
    cv2.namedWindow('bars',cv2.WINDOW_NORMAL)
    cv2.createTrackbar('H','bars',0,255,nothing)
    cv2.createTrackbar('S','bars',0,255,nothing)
    cv2.createTrackbar('V','bars',0,255,nothing)
    cv2.createTrackbar('H2','bars',255,255,nothing)
    cv2.createTrackbar('S2','bars',255,255,nothing)
    cv2.createTrackbar('V2','bars',255,255,nothing)
    while(1):
        k = cv2.waitKey(1) & 0xFF
        if k == 27:
            break

        h = cv2.getTrackbarPos('H','bars')
        s = cv2.getTrackbarPos('S','bars')
        v = cv2.getTrackbarPos('V','bars')
        h2 = cv2.getTrackbarPos('H2','bars')
        s2 = cv2.getTrackbarPos('S2','bars')
        v2 = cv2.getTrackbarPos('V2','bars')
        lower = np.array([h,s,v])
        upper = np.array([h2,s2,v2])
        
        
        #hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
        gaussblur = cv2.bilateralFilter(img,9,75,75)
        hsv = cv2.cvtColor(gaussblur, cv2.COLOR_BGR2HSV)
        mask = cv2.inRange(hsv, lower, upper)
        utils.imshow('hsv',hsv)
        utils.imshow('mask',mask)
        res = cv2.bitwise_and(img,img,mask=mask)
        utils.imshow('bars',res)
        

    cv2.destroyAllWindows()