COLLECTION_ID = ''

zc = ZegamiClient("", "")

workspace = zc.get_workspace_by_id(WORKSPACE_ID)
collection = workspace.get_collection_by_id(COLLECTION_ID)
print(collection)

imageset_id = collection._data.get('imageset_id')

resp = nodes.add_node(
    zc,
    workspace,
    'mask_annotation_score',
    # truth authors and evaluated authors should either
    # be not specified or a list of strings
    {
        # "evaluated_authors": ["authors name"],
    },
    'imageset',
    imageset_parents=[imageset_id],
    name="annotation score ims")
annotation_score_node = resp.get('imageset')

# create mapping dataset to map
join_dataset_id = collection._data.get('imageset_dataset_join_id')
resp = nodes.add_node(zc,
                      workspace,
                      'mapping', {},
                      'dataset',
                      dataset_parents=[join_dataset_id],
                      imageset_parents=[annotation_score_node.get('id')],
    scaled_imageset_id = collection._data.get('scaled_imageset_id')
    join_dataset_id = collection._data.get('imageset_dataset_join_id')
else:
    # v2
    source = collection.sources[0]
    dataset_id = collection._data.get('dataset_id')
    scaled_imageset_id = source._data.get('scaled_imageset_id')
    join_dataset_id = source._data.get('imageset_dataset_join_id')

resp = nodes.add_node(zc,
                      workspace,
                      'custom_feature_extraction', {
                          "model_name": "feature_extractor.h5",
                          "greyscale": True,
                          "width": 164,
                          "height": 164,
                      },
                      'imageset',
                      imageset_parents=scaled_imageset_id,
                      name="custom feature extraction node",
                      node_group=collection_group,
                      processing_category='image_clustering')
custom_feature_extraction_node = resp.get('imageset')

# Add new clustering node
resp = nodes.add_node(
    zc,
    workspace,
    'cluster',
    {
        'exclude': [''
# create explainability map node
resp = nodes.add_node(
    zc,
    workspace,
    'explainability_map',
    {
        "file_name":
        MODEL_NAME,  # Optional: Name of the model in workspace storage.
        # This will overwrite the tensorflow model param.
        "model_name":
        'Xception',  # Optional: Name of the tensorflow model. Default is ResNet50.
        "width":
        224,  # Optional: Width value to pre process the image before inputting into the model.
        "height":
        224,  # Optional: Height value to pre process the image before inputting into the model.
        "last_conv_layer_name":
        '',  # Optional: Name of the last convolutional layer.
        "class_index":
        '',  # Optional: Index of class to evaluate activation maps with respect to.
        # If left blank, the top predicted class is used for each image.
        "alpha":
        0.5,  # Optional: Blend weight for combining heatmap to image. Default is 0.4.
    },
    'imageset',
    imageset_parents=augment_imageset_id,
    name="explainability map node",
    node_group=collection_group,
    processing_category='upload')
explainability_map_node = resp.get('imageset')
zc = ZegamiClient()

workspace = zc.get_workspace_by_id(WORKSPACE_ID)
collection = workspace.get_collection_by_id(COLLECTION_ID)
print(collection)

# Add new clustering node
dataset_id = collection._data.get('dataset_id')
scaled_imageset_id = collection._data.get('scaled_imageset_id')
join_dataset_id = collection._data.get('imageset_dataset_join_id')

print('adding feature extraction node')
# create feature extraction node
resp = nodes.add_node(zc,
                      workspace,
                      'image_feature_extraction', {},
                      imageset_parents=[scaled_imageset_id],
                      type='imageset',
                      name="Feature extraction imageset")
features_node = resp.get('imageset')
print('\nadded feature extraction node', features_node)

# create clustering node
resp = nodes.add_node(
    zc,
    workspace,
    'cluster', {
        "columns_order": [1002, 1003],
        "out_column_titles": ["Image Similarity x", "Image Similarity y"],
        "out_columns": ["image_similarity_x", "image_similarity_y"]
    },
    dataset_parents=features_node.get('id'),
WORKSPACE_ID = ''
COLLECTION_ID = ''

zc = ZegamiClient()

workspace = zc.get_workspace_by_id(WORKSPACE_ID)
# import pdb; pdb.set_trace()
collection = workspace.get_collection_by_id(COLLECTION_ID)
print(collection)

augment_imageset_id = collection._data.get('augment_imageset_id')

# create classification imagesets to generate classification data
resp = nodes.add_node(zc,
                      workspace,
                      'image_classification', {},
                      'imageset',
                      imageset_parents=[augment_imageset_id],
                      name="classification ims")
classification_node = resp.get('imageset')

# create image_info dataset to render classifications as a tsv file
resp = nodes.add_node(zc,
                      workspace,
                      'image_info', {},
                      'dataset',
                      imageset_parents=classification_node.get('id'),
                      name="classification info ds")
info_node = resp.get('dataset')

# create mapping dataset to map
join_dataset_id = collection._data.get('imageset_dataset_join_id')
Exemple #6
0
    dataset_id = collection._data.get('dataset_id')
    scaled_imageset_id = collection._data.get('scaled_imageset_id')
    join_dataset_id = collection._data.get('imageset_dataset_join_id')
else:
    # v2
    source = collection.sources[0]
    dataset_id = collection._data.get('dataset_id')
    scaled_imageset_id = source._data.get('scaled_imageset_id')
    join_dataset_id = source._data.get('imageset_dataset_join_id')

print('adding feature extraction node')
# create feature extraction node
resp = nodes.add_node(zc,
                      workspace,
                      'image_feature_extraction', {},
                      imageset_parents=[scaled_imageset_id],
                      type='imageset',
                      name="Feature extraction " + NAME,
                      node_group=collection_group,
                      processing_category='image_clustering')
features_node = resp.get('imageset')
print('\nadded feature extraction node', features_node)

# create clustering node
resp = nodes.add_node(zc,
                      workspace,
                      'cluster', {
                          "columns_order": [1002, 1003],
                          "out_column_titles": [NAME + " x", NAME + " y"],
                          "out_columns": [col_name + "x", col_name + "y"]
                      },
                      dataset_parents=features_node.get('id'),
Exemple #7
0
COLLECTION_ID = ''

zc = ZegamiClient()

workspace = zc.get_workspace_by_id(WORKSPACE_ID)
collection = workspace.get_collection_by_id(COLLECTION_ID)
print(collection)

# Add new clustering node
dataset_id = collection._data.get('dataset_id')
resp = nodes.add_node(
    zc,
    workspace,
    'cluster',
    {
        'exclude': [''
                    ],  # list of column names as found in tsv files (not Ids)
        'out_columns': ['mean_datasimx', 'mean_datasimy'],
        'out_column_titles': ['Mean_DataSimX', 'Mean_DataSimY'],
        'columns_order': [1100, 1101
                          ]  # important to set this to a unique value
    },
    dataset_parents=dataset_id,
    name="Mean data similarity")
cluster_node = resp.get('dataset')

# Include output in collection output
output_dataset_id = collection._data.get('output_dataset_id')
resp = nodes.add_parent(zc, workspace, output_dataset_id,
                        cluster_node.get('id'))
from zegami_sdk import nodes
from zegami_sdk.client import ZegamiClient

WORKSPACE_ID = ''
COLLECTION_ID = ''

zc = ZegamiClient()

workspace = zc.get_workspace_by_id(WORKSPACE_ID)
collection = workspace.get_collection_by_id(COLLECTION_ID)
print(collection)

imageset_id = collection._data.get('imageset_id')

# create classification imagesets to generate classification data
resp = nodes.add_node(
    zc,
    workspace,
    'mask_annotation',
    {
        "weights_blob": "mask_rcnn_alloy-scratch-detector_0100.h5",
        "model_author_name": "Alloy Scratch Detector_0100",
        # TODO add param for num classes
    },
    'imageset',
    imageset_parents=[imageset_id],
    name="masks ims"
)
classification_node = resp.get('imageset')