예제 #1
0
def similor_sort(sourceData, classicData, num):
    """
    :param sourceData: dataframe include
    :param classicData: classic picture
    :param num: how many picture to pick out
    :return:
    """
    start_time = time.time()

    ref_data = SFrame()
    for index, row in sourceData.iterrows():
        #print row
        path = row['path']
        img = tc.Image(path)
        ref_data = ref_data.append(SFrame({'path': [path], 'image': [img]}))
    ref_data = ref_data.add_row_number()

    # print ref_data

    query_data = SFrame()
    for index, row in classicData.iterrows():
        path = row['path']
        img = tc.Image(path)
        query_data = query_data.append(SFrame({
            'path': [path],
            'image': [img]
        }))
    query_data = query_data.add_row_number()

    model = tc.image_similarity.create(ref_data,
                                       label=None,
                                       feature=None,
                                       model='resnet-50',
                                       verbose=True)
    if num == 0:
        num = ref_data.num_rows()

    similar_images = model.query(query_data, k=num)

    ret_array = np.zeros((query_data.num_rows(), num))
    for image in similar_images:
        ref_label = image['reference_label']
        distance = image['distance']
        query_label = image['query_label']
        ret_array[query_label][ref_label] = distance

    mean = np.mean(ret_array, axis=0)
    sourceData.insert(2, 'distance', (mean))
    #sort = np.argsort(mean)
    # print sourceData

    elapsed_time = time.time() - start_time
    print("Time elapsed = %d" % (elapsed_time))
    return sourceData
 def from_pil_image(pil_img, image_format='png'):
     if image_format == 'raw':
         image = np.array(pil_img)
         FORMAT_RAW = 2
         return tc.Image(_image_data=image.tobytes(),
                         _width=image.shape[1],
                         _height=image.shape[0],
                         _channels=image.shape[2],
                         _format_enum=FORMAT_RAW,
                         _image_data_size=image.size)
     else:
         with tempfile.NamedTemporaryFile(mode='w+b', suffix='.' + image_format) as f:
             pil_img.save(f, format=image_format)
             return tc.Image(f.name)
예제 #3
0
def verify(uuid, img_uri):
    filename = "{}/verify/{}.png".format(sys.path[0], uuid)
    os.makedirs(os.path.dirname(filename), exist_ok=True)
    #mono_uri = flatten.monochrome_img(img_uri)
    #uri = uri_str_to_bytes(mono_uri)
    uri = uri_str_to_bytes(img_uri)
    fh = open(filename, "wb")
    fh.write(base64.decodebytes(uri))
    fh.close()

    #data = tc.image_analysis.load_images(filename, with_path=True)
    test = tc.SFrame({'image': [tc.Image(filename)]})

    model = tc.load_model('{}/models/{}.model'.format(sys.path[0], uuid))
    # 3. Generate prediction
    #predictions = model.predict(dataset=data)
    test['predictions'] = model.predict(test, output_type="probability_vector")

    os.remove(filename)

    return {
        "statusCode": 200,
        "body": {
            "authentic": float(test['predictions'][0][0]),
            "forge": float(test['predictions'][0][1])
        }
    }
예제 #4
0
파일: testScript.py 프로젝트: kawer/BeerML
def drop_alpha(image):
    return tc.Image(_image_data=image.pixel_data[..., :3].tobytes(),
                    _width=image.width,
                    _height=image.height,
                    _channels=3,
                    _format_enum=2,
                    _image_data_size=image.width * image.height * 3)
예제 #5
0
 def draw_single_image(row):
     image = row['image']
     anns = row['annotations']
     row_number = row['id']
     if anns == None:
         anns = []
     elif type(anns) == dict:
         anns = [anns]
     try:
         pil_img = Image.fromarray(image.pixel_data)
         _annotate_image(pil_img,
                         anns,
                         confidence_threshold=confidence_threshold)
         image = _np.array(pil_img)
         if len(image.shape) == 2:
             # Grayscale image, reshape image shape
             image = image.reshape(image.shape[0], image.shape[1], 1)
         FORMAT_RAW = 2
         annotated_image = _tc.Image(_image_data=image.tobytes(),
                                     _width=image.shape[1],
                                     _height=image.shape[0],
                                     _channels=image.shape[2],
                                     _format_enum=FORMAT_RAW,
                                     _image_data_size=image.size)
     except Exception as e:
         if row_number == -1:
             # indication that it was a single image and not an SFrame
             raise _ToolkitError(e)
         raise _ToolkitError("Received exception at row " +
                             str(row_number) + ": " + e)
     return annotated_image
예제 #6
0
def createImage(numpy_image, width, height):
    return tc.Image(_image_data=numpy_image.tobytes(),
                    _width=width,
                    _height=height,
                    _channels=3,
                    _format_enum=2,
                    _image_data_size=width * height * 3)
def main():

    args = doParsing()
    print(args)

    sframeDict = {"features": [], "targets": []}

    classesSubDirs = next(os.walk(args.imagesDir))[1]

    for classSubDir in tqdm(classesSubDirs):

        for imageFile in tqdm(
                glob.glob(
                    os.path.join(args.imagesDir, classSubDir) + "/*.jpg")):

            # We can directly save the image without resizing
            # (performed automatically to model input size during training)
            image = tc.Image(imageFile)
            sframeDict["features"].append(image)
            sframeDict["targets"].append(classSubDir)

    print("Saving SFrame...")

    datasetSFrame = tc.SFrame(data=sframeDict)
    datasetSFrame.save(filename=args.outputDatasetFile)

    print("SFrame saved in " + args.outputDatasetFile)
예제 #8
0
 def __init__(self):
     self.imgframe = tc.load_sframe('model/final/final.sframe')
     self.model = tc.load_model('model/final/final_model')
     self.sample = tc.Image()
     self.results = SFrame()
     self.rows = SArray()
     self.pathlist = []
     self.distance_list = []
예제 #9
0
 def from_pil_image(pil_img, image_format="png"):
     # The above didn't work, so as a temporary fix write to temp files
     if image_format == "raw":
         image = np.array(pil_img)
         FORMAT_RAW = 2
         return tc.Image(
             _image_data=image.tobytes(),
             _width=image.shape[1],
             _height=image.shape[0],
             _channels=image.shape[2],
             _format_enum=FORMAT_RAW,
             _image_data_size=image.size,
         )
     else:
         with tempfile.NamedTemporaryFile(mode="w+b",
                                          suffix="." + image_format) as f:
             pil_img.save(f, format=image_format)
             return tc.Image(f.name)
예제 #10
0
def get_tc_img(img):
    assert (isinstance(img, np.ndarray)), 'Image is not of type numpy.ndarray.'

    RAW_FORMAT = 2
    return tc.Image(_image_data=img.tobytes(),
                    _width=img.shape[1],
                    _height=img.shape[0],
                    _channels=img.shape[2],
                    _format_enum=RAW_FORMAT,
                    _image_data_size=img.size)
예제 #11
0
def model_predict(model, image_path):
    image = tc.Image(image_path)
    image_sframe = tc.SFrame({
        'path': [image_path],
        'image': [image],
        'type': ['']
    })
    print("Image SFrame : \n", image_sframe)

    prediction = model.predict(image_sframe)
    print("Prediction : \n", prediction)
예제 #12
0
def _convert_image_to_raw(image):
    FORMAT_RAW = 2
    if image._format_enum == FORMAT_RAW:
        return image
    else:
        return _tc.Image(_image_data=image.pixel_data.tobytes(),
                         _width=image.width,
                         _height=image.height,
                         _channels=image.channels,
                         _format_enum=FORMAT_RAW,
                         _image_data_size=image.width * image.height *
                         image.channels)
예제 #13
0
def get_test_data():
    """
    Create 5 all white images and 5 all black images. Then add some noise to
    each image.
    """
    from PIL import Image

    DIM = 224

    # Five all white images
    data = []
    for _ in range(5):
        data.append(np.full((DIM, DIM, 3), 255, dtype=np.uint8))

    # Five all black images
    for _ in range(5):
        data.append(np.full((DIM, DIM, 3), 0, dtype=np.uint8))

    # Add some random noise to each images
    random = np.random.RandomState(100)
    for cur_image in data:
        for _ in range(1000):
            x, y = random.randint(DIM), random.randint(DIM)
            rand_pixel_value = (
                random.randint(255),
                random.randint(255),
                random.randint(255),
            )
            cur_image[x][y] = rand_pixel_value

    # Convert to an array of tc.Images
    images = []
    for cur_data in data:
        pil_image = Image.fromarray(cur_data)
        image_data = bytearray([z for l in pil_image.getdata() for z in l])
        image_data_size = len(image_data)
        tc_image = tc.Image(
            _image_data=image_data,
            _width=DIM,
            _height=DIM,
            _channels=3,
            _format_enum=2,
            _image_data_size=image_data_size,
        )
        images.append(tc_image)

    labels = ["white"] * 5 + ["black"] * 5
    data_dict = {"awesome_image": images, "awesome_label": labels}
    data = tc.SFrame(data_dict)

    return data
예제 #14
0
def load():
    """ Loads the exported data into an SFrame """
    annotations = turicreate.SFrame(data="%s/annotations.csv" % (EXPORT_PATH))

    # Load our images
    images = list()

    for path in annotations["path"]:
        image = turicreate.Image("%s/%s" % (EXPORT_PATH, path))
        images.append(image)

    data = annotations
    data["image"] = images

    return data
 def draw_single_image(row):
     image = row['image']
     anns = row['annotations']
     pil_img = Image.fromarray(image.pixel_data)
     _annotate_image(pil_img,
                     anns,
                     confidence_threshold=confidence_threshold)
     image = _np.array(pil_img)
     FORMAT_RAW = 2
     annotated_image = _tc.Image(_image_data=image.tobytes(),
                                 _width=image.shape[1],
                                 _height=image.shape[0],
                                 _channels=image.shape[2],
                                 _format_enum=FORMAT_RAW,
                                 _image_data_size=image.size)
     return annotated_image
예제 #16
0
 def resize_turicreate_image(image, output_shape):
     image *= 255.0
     image = image.astype("uint8")
     FORMAT_RAW = 2
     tc_image = tc.Image(
         _image_data=image.tobytes(),
         _width=image.shape[1],
         _height=image.shape[0],
         _channels=image.shape[2],
         _format_enum=FORMAT_RAW,
         _image_data_size=image.size,
     )
     tc_image = tc.image_analysis.resize(
         tc_image, output_shape[1], output_shape[0], resample="bilinear"
     )
     image = tc_image.pixel_data
     image = image.astype(np.float32)
     image /= 255.0
     return image
예제 #17
0
 def from_pil_image(pil_img):
     height = pil_img.size[1]
     width = pil_img.size[0]
     if pil_img.mode == 'L':
         image_data = bytearray([z for z in pil_img.getdata()])
         channels = 1
     elif pil_img.mode == 'RGB':
         image_data = bytearray([z for l in pil_img.getdata() for z in l ])
         channels = 3
     else:
         image_data = bytearray([z for l in pil_img.getdata() for z in l])
         channels = 4
     format_enum = _format['RAW']
     image_data_size = len(image_data)
     img = tc.Image(_image_data=image_data,
             _width=width, _height=height,
             _channels=channels,
             _format_enum=format_enum,
             _image_data_size=image_data_size)
     return img
예제 #18
0
def main():
    """
    Script to export results for Kaggle, Images are read one by one
    """
    args = doParsing()
    print(args)

    # Load model
    model = tc.load_model(args.modelPath)

    print("Loaded model from " + args.modelPath)

    # Dogs and cats test dataset has 12500 samples

    results = []

    # One by one image prediction
    for imageFile in sorted(glob.glob(args.datasetTestDir + "/*.jpg")):

        # Load image as tc Image, no explicit resize to model input size
        image = tc.Image(imageFile)

        # Single image SFrame compatible with model utility functions
        sframe = tc.SFrame(data={"features": [image]})

        # Get and print TOP1
        probabilities = model.predict(sframe, output_type="probability_vector")
        print(
            os.path.basename(imageFile) + " -> " +
            model.classes[int(np.argmax(probabilities.to_numpy()[0]))])

        # Get and save dog probability
        results.append((os.path.basename(
            imageFile)[:os.path.basename(imageFile).rfind('.')],
                        probabilities[0][model.classes.index("dog")]))

    print("Test finished")

    if args.kaggleExportFile is not None:
        exportResults(results, args.kaggleExportFile)
def row_from_annotated_image(annotated_image):

    # create Image object
    img_filename = img_filename_from_annotated_image(annotated_image)

    img = tc.Image(images_dir_path + img_filename)
    image_size = ImageSize(width=img.width, height=img.height)

    if annotated_image["Label"] == "Skip":
        return (img, None)

    labelbox_annotations = annotated_image["Label"].iteritems()

    # parse and process annotations info
    def sframe_annotations_from_labelbox_annotations(class_annotations):
        class_name = class_annotations[0]
        bounding_boxes = class_annotations[1]
        sframe_entry = []
        for bounding_box in bounding_boxes:
            coordinate = {
                'coordinates':
                coordinates_from_bounding_box(bounding_box, image_size),
                'label':
                class_name,
                'type':
                'rectangle'
            }
            sframe_entry.append(coordinate)
        return sframe_entry

    sframe_annotations = map(
        lambda kv: sframe_annotations_from_labelbox_annotations(kv),
        labelbox_annotations)

    # we need to flatten it to a list of dictionaries ... turicreate supports
    flat_sframe_annotations = [
        item for sublist in sframe_annotations for item in sublist
    ]

    return (img, flat_sframe_annotations)
def get_bitmap_sframe():
    labels, drawings = [], []
    for category in categories:
        data = np.load(
            bitmap_directory + '/' + category + '.npy', 
            allow_pickle=True
        )
        random_state.shuffle(data)
        sampled_data = data[:training_samples]
        transformed_data = sampled_data.reshape(
            sampled_data.shape[0], 28, 28, 1)

        for pixel_data in transformed_data:
            image = tc.Image(_image_data=np.invert(pixel_data).tobytes(),
                 _width=pixel_data.shape[1],
                 _height=pixel_data.shape[0],
                 _channels=pixel_data.shape[2],
                 _format_enum=2,
                 _image_data_size=pixel_data.size)
            drawings.append(image)
            labels.append(category)
        print('...%s bitmaps complete' % category)
    print('%d bitmaps with %d labels' % (len(drawings), len(labels)))
    return tc.SFrame({'drawing': drawings, 'label': labels})
예제 #21
0
    def stylize(self, images, style=None, verbose=True, max_size=800, batch_size = 4):
        """
        Stylize an SFrame of Images given a style index or a list of
        styles.

        Parameters
        ----------
        images : SFrame | Image
            A dataset that has the same content image column that was used
            during training.

        style : int or list, optional
            The selected style or list of styles to use on the ``images``. If
            `None`, all styles will be applied to each image in ``images``.

        verbose : bool, optional
            If True, print progress updates.

        max_size : int or tuple
            Max input image size that will not get resized during stylization.

            Images with a side larger than this value, will be scaled down, due
            to time and memory constraints. If tuple, interpreted as (max
            width, max height). Without resizing, larger input images take more
            time to stylize.  Resizing can effect the quality of the final
            stylized image.

        batch_size : int, optional
            If you are getting memory errors, try decreasing this value. If you
            have a powerful computer, increasing this value may improve
            performance.

        Returns
        -------
        out : SFrame or SArray or turicreate.Image
            If ``style`` is a list, an SFrame is always returned. If ``style``
            is a single integer, the output type will match the input type
            (Image, SArray, or SFrame).

        See Also
        --------
        create

        Examples
        --------
        >>> image = tc.Image("/path/to/image.jpg")
        >>> stylized_images = model.stylize(image, style=[0, 1])
        Data:
        +--------+-------+------------------------+
        | row_id | style |     stylized_image     |
        +--------+-------+------------------------+
        |   0    |   0   | Height: 256 Width: 256 |
        |   0    |   1   | Height: 256 Width: 256 |
        +--------+-------+------------------------+
        [2 rows x 3 columns]

        >>> images = tc.image_analysis.load_images('/path/to/images')
        >>> stylized_images = model.stylize(images)
        Data:
        +--------+-------+------------------------+
        | row_id | style |     stylized_image     |
        +--------+-------+------------------------+
        |   0    |   0   | Height: 256 Width: 256 |
        |   0    |   1   | Height: 256 Width: 256 |
        |   0    |   2   | Height: 256 Width: 256 |
        |   0    |   3   | Height: 256 Width: 256 |
        |   1    |   0   | Height: 640 Width: 648 |
        |   1    |   1   | Height: 640 Width: 648 |
        |   1    |   2   | Height: 640 Width: 648 |
        |   1    |   3   | Height: 640 Width: 648 |
        +--------+-------+------------------------+
        [8 rows x 3 columns]
        """
        if(batch_size < 1):
            raise _ToolkitError("'batch_size' must be greater than or equal to 1")

        from ._sframe_loader import SFrameSTIter as _SFrameSTIter
        import mxnet as _mx
        from mxnet import gluon as _gluon
        set_of_all_idx = self._style_indices()
        style, single_style = self._style_input_check(style)

        if isinstance(max_size, _six.integer_types):
            input_shape = (max_size, max_size)
        else:
            # Outward-facing, we use (width, height), but internally we use
            # (height, width)
            input_shape = max_size[::-1]

        images, unpack = self._canonize_content_input(images, single_style=single_style)

        dataset_size = len(images)
        output_size = dataset_size * len(style)
        batch_size_each = min(batch_size, output_size)
        num_mxnet_gpus = _mxnet_utils.get_num_gpus_in_use(max_devices=batch_size_each)

        if num_mxnet_gpus == 0:
            # CPU processing prefers native size to prevent stylizing
            # unnecessary regions
            batch_size_each = 1
            loader_type = 'favor-native-size'
        else:
            # GPU processing prefers batches of same size, using padding
            # for smaller images
            loader_type = 'pad'

        self._model.batch_size = batch_size_each
        self._model.hybridize()

        ctx = _mxnet_utils.get_mxnet_context(max_devices=batch_size_each)
        batch_size = max(num_mxnet_gpus, 1) * batch_size_each
        last_time = 0
        if dataset_size == 0:
            raise _ToolkitError("SFrame cannot be empty")
        content_feature = _tkutl._find_only_image_column(images)
        _raise_error_if_not_training_sframe(images, content_feature)

        max_h = 0
        max_w = 0
        oversized_count = 0
        for img in images[content_feature]:
            if img.height > input_shape[0] or img.width > input_shape[1]:
                oversized_count += 1
            max_h = max(img.height, max_h)
            max_w = max(img.width, max_w)

        if input_shape[0] > max_h:
            input_shape = (max_h, input_shape[1])
        if input_shape[1] > max_w:
            input_shape = (input_shape[0], max_w)

        # If we find large images, let's switch to sequential iterator
        # pre-processing, to prevent memory issues.
        sequential = max(max_h, max_w) > 2000

        if verbose and output_size != 1:
            print('Stylizing {} image(s) using {} style(s)'.format(dataset_size, len(style)))
            if oversized_count > 0:
                print('Scaling down {} image(s) exceeding {}x{}'.format(oversized_count, input_shape[1], input_shape[0]))

        content_images_loader = _SFrameSTIter(images, batch_size,
                                              shuffle=False,
                                              feature_column=content_feature,
                                              input_shape=input_shape,
                                              num_epochs=1,
                                              loader_type=loader_type,
                                              repeat_each_image=len(style),
                                              sequential=sequential)

        sb = _tc.SFrameBuilder([int, int, _tc.Image],
                               column_names=['row_id', 'style', 'stylized_{}'.format(self.content_feature)])

        count = 0
        for i, batch in enumerate(content_images_loader):
            if loader_type == 'favor-native-size':
                c_data = [batch.data[0][0].expand_dims(0)]
            else:
                c_data = _gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0)
            indices_data = _gluon.utils.split_and_load(_mx.nd.array(batch.repeat_indices, dtype=_np.int64),
                                                       ctx_list=ctx, batch_axis=0)
            outputs = []
            for b_img, b_indices in zip(c_data, indices_data):
                mx_style = _mx.nd.array(style, dtype=_np.int64, ctx=b_indices.context)
                b_batch_styles = mx_style[b_indices]
                output = self._model(b_img, b_batch_styles)
                outputs.append(output)

            image_data = _np.concatenate([
                (output.asnumpy().transpose(0, 2, 3, 1) * 255).astype(_np.uint8)
                for output in outputs], axis=0)

            batch_styles = [style[idx] for idx in batch.repeat_indices]

            for b in range(batch_size - (batch.pad or 0)):
                image = image_data[b]
                # Crop to remove added padding
                crop = batch.crop[b]
                cropped_image = image[crop[0]:crop[1], crop[2]:crop[3]]
                tc_img = _tc.Image(_image_data=cropped_image.tobytes(),
                                   _width=cropped_image.shape[1],
                                   _height=cropped_image.shape[0],
                                   _channels=cropped_image.shape[2],
                                   _format_enum=2,
                                   _image_data_size=cropped_image.size)
                sb.append([batch.indices[b], batch_styles[b], tc_img])
                count += 1

            cur_time = _time.time()
            if verbose and output_size != 1 and (cur_time > last_time + 10 or count == output_size):
                print('Stylizing {curr_image:{width}d}/{max_n:{width}d}'.
                      format(curr_image=count, max_n=output_size, width=len(str(output_size))))
                last_time = cur_time

        return unpack(sb.close())
예제 #22
0
import turicreate as tc

# Load the model
model_file = './probe_id_model_dir/probe_id.model'
model = tc.load_model(model_file)

# Load the test images
path_prefix = '../medical_data/mp4_probe_overlayed/extracted_frames/vid1/'
filename = 'img0002.jpg'
file = path_prefix + filename

test_image = tc.SFrame({'image': [tc.Image(file)]})

test_image['predictions'] = model.predict(test_image, confidence_threshold=0)
print(test_image['predictions'])
'''
test_image['annotated_predictions'] = \
    tc.one_shot_object_detector.util.draw_bounding_boxes(
        test_image['image'],
        test_image['predictions']
    )

print(test_image['annotated_predictions'])

test_image.explore()
'''
예제 #23
0
import turicreate as tc

# Load the starter images
starter_images = tc.SFrame({
    'image': [tc.Image('stop_sign_starter.png')],
    'label': ['stop_sign']
})

# Load test images
test_images = tc.SFrame({
    'image':
    [tc.Image('stop_sign_test1.jpg'),
     tc.Image('stop_sign_test2.jpg')]
})

# Create a model. This step will take a few hours on CPU and about an hour on GPU
model = tc.one_shot_object_detector.create(starter_images, 'label')

# Save predictions on the test set
test_images['predictions'] = model.predict(test_images)

# Draw prediction bounding boxes on the test images
test_images['annotated_predictions'] = \
    tc.one_shot_object_detector.util.draw_bounding_boxes(test_images['image'],
        test_images['predictions'])

# To visualize the predictions made on the test set
test_images.explore()
test_images.save('test_data.csv', format='csv')

# Save the model for later use in TuriCreate
예제 #24
0
import turicreate as tc
import sys

if len(sys.argv) != 2:
    print(
        'Incorect number of arguments. Please call with exactly one argument which should be the .csv file specifying the image annotations.'
    )
    sys.exit()
file_name = sys.argv[1]
sFrame = tc.SFrame.read_csv(file_name)
sFrame['image'] = sFrame['image_path'].apply(lambda path: tc.Image(path))
train_data, test_data = sFrame.random_split(0.8)
model = tc.object_detector.create(sFrame,
                                  feature='image',
                                  annotations='annotations')
output_model_name = file_name + '.mlmodel'
model.export_coreml(output_model_name)
model.evaluate(train_data, test_data, metric='perplexity')
예제 #25
0
    image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
    image2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)

    image1 = cv2.resize(image1, image_size)
    image2 = cv2.resize(image2, image_size)

    concated_image = np.concatenate((image1, image2), axis=0)

    cv2.imwrite("temp.png", concated_image)

    try:

        model = tc.load_model("angleRoationPredict.model")

        tc_image = tc.Image("temp.png")

        result = model.predict(tc_image)

        ref = 1

        if (result == "0"):

            ref = 0

        if (result == "1"):

            ref = 90

        if (result == "2"):
예제 #26
0
파일: testScript.py 프로젝트: kawer/BeerML
		'miller': "Miller",
		'tecateC': "Tecate",
		'tecate': "Tecate"
	}
	return switcher.get(label_original, 'ERROR')

def drop_alpha(image):
    return tc.Image(_image_data=image.pixel_data[..., :3].tobytes(),
                    _width=image.width,
                    _height=image.height,
                    _channels=3,
                    _format_enum=2,
                    _image_data_size=image.width * image.height * 3)

imagesList = []
pic = tc.Image('imagesOld/train/IMG_1206.JPG')
pic = drop_alpha(pic)
imagesList.append(pic)

annotationList = []
tree = ET.parse('imagesOld/train/IMG_1206.xml')
root = tree.getroot()
children = root.getchildren()
anotation = []
for object in root.iter('object'):
	label = object[0].text
	label = label_maker(label)
	if label == 'ERROR':
		continue
	height = int(object[4][2].text)-int(object[4][0].text)
	width = int(object[4][3].text)-int(object[4][1].text)
예제 #27
0
                             }
                         }],
                         [{
                             'label': 'painting111',
                             'type': 'rectangle',
                             'coordinates': {
                                 'height': 394,
                                 'width': 391,
                                 'x': 517,
                                 'y': 572
                             }
                         }]])

#load images
images = tc.SArray([
    tc.Image('images/IMG_0082.jpeg'),
    tc.Image('images/IMG_0083.jpeg'),
    tc.Image('images/IMG_0084.jpeg'),
    tc.Image('images/IMG_0085.jpeg'),
    tc.Image('images/IMG_0086.jpeg'),
    tc.Image('images/IMG_0087.jpeg'),
    tc.Image('images/IMG_0088.jpeg'),
    tc.Image('images/IMG_0089.jpeg'),
    tc.Image('images/IMG_0090.jpeg'),
    tc.Image('images/IMG_0091.jpeg'),
    tc.Image('images/IMG_0092.jpeg'),
    tc.Image('images/IMG_0093.jpeg'),
    tc.Image('images/IMG_0094.jpeg'),
    tc.Image('images/IMG_0095.jpeg'),
    tc.Image('images/IMG_0096.jpeg'),
    tc.Image('images/IMG_0097.jpeg'),
예제 #28
0
 def create_sample(self, imgUrl):
     self.sample = tc.Image(imgUrl, format='auto')
예제 #29
0
    }],
    [{
        'label': 'PuzzleBox',
        'type': 'rectangle',
        'coordinates': {
            'x': 175,
            'y': 100,
            'width': 173,
            'height': 189
        }
    }],
])

#load images by providing their relative path to the folder
images = tc.SArray([
    tc.Image('images/Object0.png'),
    tc.Image('images/Object1.png'),
    tc.Image('images/Object3.png'),
    tc.Image('images/Object4.png'),
    tc.Image('images/Object5.png'),
    tc.Image('images/Object6.png'),
    tc.Image('images/Object7.png'),
])

# Merge images and annotations
data = tc.SFrame({'image': images, 'annotations': annotations})

# Make a train-test split
train_data, test_data = data.random_split(0.8)

# Create a model using Turi Create’s object detector API
예제 #30
0
import turicreate as tc

starter_img_path = './images/medical_images/probe_images/probe_image.png'
starter_images = tc.SFrame({'image':[tc.Image(starter_img_path)],
                            'label':['probe']}
                           )

model = tc.one_shot_object_detector.create(starter_images, 'label', max_iterations=1000)
model.save('probe_id2.model')