예제 #1
0
파일: eval.py 프로젝트: brokendata/cnn4brca
def evaluate(image_path, label_path):
	""" Loads network, reads image and returns IOU."""
	# Load image and label
	image = load_image(image_path)
	label = scipy.misc.imread(label_path)
	
	# Define the model
	prediction = model.model(image, drop=False)
	
	# Get a saver
	saver = tf.train.Saver()

	# Launch the graph
	with tf.Session() as sess:
		# Restore variables
		checkpoint_path = tf.train.latest_checkpoint(checkpoint_dir)
		saver.restore(sess, checkpoint_path)
		model.log("Variables restored from:", checkpoint_path)
	
		logits = prediction.eval()
		
		segmentation = post(logits, label, threshold = -1)
		
		iou = IOU(segmentation, label)
		
		print("iou =", iou)
		
	return iou
예제 #2
0
파일: control.py 프로젝트: labase/activnce
    def get(self, registry_id):
        user = self.get_current_user()
        page = int(self.get_argument("page","1"))
        log_data = []
        if isACommunity(registry_id) or user==registry_id:
            log_data = model.get_news_list(registry_id, date_time=True)
        else:
            log_data = model.get_log_list(registry_id, date_time=True, news=True)

        log_count = len(log_data)
        
        # obtem apenas a página que será exibida
        skip = (page-1)*NUM_MAX_NOVIDADES
        log_data = log_data[skip:skip+NUM_MAX_NOVIDADES]
        
        tabs = []
        if user==registry_id:
            tabs.append(("O que meus amigos tem feito no "+PLATAFORMA, ""))
            tabs.append(("O que eu tenho feito no "+PLATAFORMA, "/news"))
        
        model.log(user, u'acessou as novidades de', objeto=registry_id, tipo="news", news=False)        
        self.render("modules/log/news-list.html", NOMEPAG="novidades", REGISTRY_ID=registry_id, \
                                                  NEWS=log_data, NEWS_COUNT=log_count, \
                                                  PAGE=page, PAGESIZE=NUM_MAX_NOVIDADES, \
                                                  TABS=tabs, \
                                                  MSG="")
예제 #3
0
def _setUpAPIServer(hostname=None, port=None):
    global _xmlrpc_api_server
    global api_conn_info
    if _xmlrpc_api_server is None:
        #TODO: some way to get defaults.. from config?
        if str(hostname) == "None":
            hostname = "localhost"
        if str(port) == "None":
            port = 9876

        if CONF.getApiConInfo() is None:
            CONF.setApiConInfo(hostname, port)
        devlog("starting XMLRPCServer with api_conn_info = %s" % str(CONF.getApiConInfo()))
        try:
            _xmlrpc_api_server = model.common.XMLRPCServer(CONF.getApiConInfo())
            # Registers the XML-RPC introspection functions system.listMethods, system.methodHelp and system.methodSignature.
            _xmlrpc_api_server.register_introspection_functions()

            # register a function to nicely stop server
            _xmlrpc_api_server.register_function(_xmlrpc_api_server.stop_server)

            # register all the api functions to be exposed by the server
            _xmlrpc_api_server.register_function(createAndAddHost)
            _xmlrpc_api_server.register_function(createAndAddInterface)
            _xmlrpc_api_server.register_function(createAndAddServiceToApplication)
            _xmlrpc_api_server.register_function(createAndAddServiceToInterface)
            _xmlrpc_api_server.register_function(createAndAddApplication)
            _xmlrpc_api_server.register_function(createAndAddNoteToService)
            _xmlrpc_api_server.register_function(createAndAddNoteToHost)
            _xmlrpc_api_server.register_function(createAndAddNoteToNote)
            _xmlrpc_api_server.register_function(createAndAddVulnWebToService)
            _xmlrpc_api_server.register_function(createAndAddVulnToService)
            _xmlrpc_api_server.register_function(createAndAddVulnToHost)
            _xmlrpc_api_server.register_function(addHost)
            _xmlrpc_api_server.register_function(addInterface)
            _xmlrpc_api_server.register_function(addServiceToApplication)
            _xmlrpc_api_server.register_function(addServiceToInterface)
            _xmlrpc_api_server.register_function(addApplication)
            _xmlrpc_api_server.register_function(newHost)
            _xmlrpc_api_server.register_function(newInterface)
            _xmlrpc_api_server.register_function(newService)
            _xmlrpc_api_server.register_function(newApplication)
            _xmlrpc_api_server.register_function(devlog)

            #TODO: check if all necessary APIs are registered here!!

            devlog("XMLRPC API server configured...")
        except Exception, e:
            msg = "There was an error creating the XMLRPC API Server:\n%s" % str(e)
            log(msg)
            devlog("[ERROR] - %s" % msg)
예제 #4
0
# ## Bounding Boxes
#
# Rather than using bounding box coordinates provided by the source datasets, we compute the bounding boxes from masks instead. This allows us to handle bounding boxes consistently regardless of the source dataset, and it also makes it easier to resize, rotate, or crop images because we simply generate the bounding boxes from the updates masks rather than computing bounding box transformation for each type of image transformation.

# In[5]:

# Load random image and mask.
image_id = random.choice(dataset.image_ids)
image = dataset.load_image(image_id)
mask, class_ids = dataset.load_mask(image_id)
# Compute Bounding box
bbox = utils.extract_bboxes(mask)

# Display image and additional stats
print("image_id ", image_id, dataset.image_reference(image_id))
log("image", image)
log("mask", mask)
log("class_ids", class_ids)
log("bbox", bbox)
# Display image and instances
visualize.display_instances(image, bbox, mask, class_ids, dataset.class_names)

# ## Resize Images
#
# To support multiple images per batch, images are resized to one size (1024x1024). Aspect ratio is preserved, though. If an image is not square, then zero padding is added at the top/bottom or right/left.

# In[6]:

# Load random image and mask.
image_id = np.random.choice(dataset.image_ids, 1)[0]
image = dataset.load_image(image_id)
예제 #5
0
weights_path = "./mask_rcnn_scrap_0126.h5"

# Load weights
print("Loading weights ", weights_path)
model.load_weights(weights_path, by_name=True)

image_id = random.choice(dataset.image_ids)
image, image_meta, gt_class_id, gt_bbox, gt_mask =\
    modellib.load_image_gt(dataset, config, image_id, use_mini_mask=False)
info = dataset.image_info[image_id]
print("image ID: {}.{} ({}) {}".format(info["source"], info["id"], image_id,
                                       dataset.image_reference(image_id)))

# Run object detection
results = model.detect([image], verbose=1)

# Display results
ax = get_ax(1)
r = results[0]
visualize.display_instances(image,
                            r['rois'],
                            r['masks'],
                            r['class_ids'],
                            dataset.class_names,
                            r['scores'],
                            ax=ax,
                            title="Predictions")
log("gt_class_id", gt_class_id)
log("gt_bbox", gt_bbox)
log("gt_mask", gt_mask)
def checkHandyMask():
    dataset = ShapesDataset()
    dataset.load_imgs('input/stage2_new/')
    dataset.prepare()

    for i in range(len(dataset.image_ids)):
        id = dataset.image_ids[i]
        image = dataset.load_image(id)
        masks = dataset.load_mask(id)[0] > 0
        for i in range(masks.shape[-1]):
            plt.imshow(masks[..., i])
            plt.show()


"""
image_id = np.random.choice(dataset.image_ids, 1)[0]
image, image_meta, class_ids, bbox, mask = modellib.load_image_gt(
    dataset, config, image_id, augment=True, use_mini_mask=True)
log("mask", mask)
display_images([image]+[mask[:,:,i] for i in range(min(mask.shape[-1], 7))])
"""
if __name__ == "__main__":
    #continueTraining(30)
    #loadModelPredict()

    #     loadModelPredictCoCo_oof_kfold(TRAIN_PATH, False)
    #     loadModelPredictCoCo_kfold(TEST_PATH, False, True)

    continueTrainingCoCo_kfold(100)
#     loadModelPredictCoCo(TEST_PATH, False, 'sub', True)
예제 #7
0
파일: api.py 프로젝트: x0james/faraday
def _setUpAPIServer(hostname=None, port=None):
    global _xmlrpc_api_server
    global api_conn_info
    if _xmlrpc_api_server is None:
        #TODO: some way to get defaults.. from config?
        if str(hostname) == "None":
            hostname = "127.0.0.1"
        if str(port) == "None":
            port = 9876

        if CONF.getApiConInfo() is None:
            CONF.setApiConInfo(hostname, port)
        devlog("starting XMLRPCServer with api_conn_info = %s" %
               str(CONF.getApiConInfo()))

        hostnames = [hostname]
        if hostname == "localhost":
            hostnames.append("127.0.0.1")

        listening = False
        for hostname in hostnames:

            try:
                _xmlrpc_api_server = model.common.XMLRPCServer(
                    (hostname, CONF.getApiConInfoPort()))
                # Registers the XML-RPC introspection functions system.listMethods, system.methodHelp and system.methodSignature.
                _xmlrpc_api_server.register_introspection_functions()

                # register a function to nicely stop server
                _xmlrpc_api_server.register_function(
                    _xmlrpc_api_server.stop_server)

                # register all the api functions to be exposed by the server
                _xmlrpc_api_server.register_function(createAndAddHost)
                _xmlrpc_api_server.register_function(createAndAddInterface)
                _xmlrpc_api_server.register_function(
                    createAndAddServiceToInterface)
                _xmlrpc_api_server.register_function(createAndAddServiceToHost)
                _xmlrpc_api_server.register_function(createAndAddNoteToService)
                _xmlrpc_api_server.register_function(createAndAddNoteToHost)
                _xmlrpc_api_server.register_function(createAndAddNoteToNote)
                _xmlrpc_api_server.register_function(
                    createAndAddVulnWebToService)
                _xmlrpc_api_server.register_function(createAndAddVulnToService)
                _xmlrpc_api_server.register_function(createAndAddVulnToHost)
                _xmlrpc_api_server.register_function(addHost)
                _xmlrpc_api_server.register_function(newHost)
                _xmlrpc_api_server.register_function(newService)
                _xmlrpc_api_server.register_function(devlog)

                #TODO: check if all necessary APIs are registered here!!
                listening = True
                CONF.setApiConInfo(hostname, port)
                CONF.saveConfig()

                getLogger().info("XMLRPC API server configured on %s" %
                                 str(CONF.getApiConInfo()))
                break

            except socket.error as e:
                msg = "There was an error creating the XMLRPC API Server (Host:{}): {}".format(
                    hostname, e)
                log(msg)
                devlog("[WARNING] - %s" % msg)

        if not listening:
            raise RuntimeError("Port already in use")
예제 #8
0
# Display a random sample of proposals.
# Proposals classified as background are dotted, and
# the rest show their class and confidence score.
limit = 200
ixs = np.random.randint(0, proposals.shape[0], limit)
captions = ["{} {:.3f}".format(class_names[c], s) if c > 0 else ""
            for c, s in zip(roi_class_ids[ixs], roi_scores[ixs])]
visualize.draw_boxes(image, boxes=proposals[ixs],
                     visibilities=np.where(roi_class_ids[ixs] > 0, 2, 1),
                     captions=captions, title="ROIs Before Refinement",
                     ax=get_ax())


# Class-specific bounding box shifts.
roi_bbox_specific = mrcnn["deltas"][0, np.arange(proposals.shape[0]), roi_class_ids]
log("roi_bbox_specific", roi_bbox_specific)

# Apply bounding box transformations
# Shape: [N, (y1, x1, y2, x2)]
refined_proposals = utils.apply_box_deltas(
    proposals, roi_bbox_specific * config.BBOX_STD_DEV).astype(np.int32)
log("refined_proposals", refined_proposals)

# Show positive proposals
# ids = np.arange(roi_boxes.shape[0])  # Display all
limit = 5
ids = np.random.randint(0, len(roi_positive_ixs), limit)  # Display random sample
captions = ["{} {:.3f}".format(class_names[c], s) if c > 0 else ""
            for c, s in zip(roi_class_ids[roi_positive_ixs][ids], roi_scores[roi_positive_ixs][ids])]
visualize.draw_boxes(image, boxes=proposals[roi_positive_ixs][ids],
                     refined_boxes=refined_proposals[roi_positive_ixs][ids],
예제 #9
0
파일: api.py 프로젝트: perplext/faraday
def _setUpAPIServer(hostname=None, port=None):
    global _xmlrpc_api_server
    global api_conn_info
    if _xmlrpc_api_server is None:
        #TODO: some way to get defaults.. from config?
        if str(hostname) == "None":
            hostname = "127.0.0.1"
        if str(port) == "None":
            port = 9876

        if CONF.getApiConInfo() is None:
            CONF.setApiConInfo(hostname, port)
        devlog("starting XMLRPCServer with api_conn_info = %s" % str(CONF.getApiConInfo()))

        hostnames = [hostname]
        if hostname == "localhost":
            hostnames.append("127.0.0.1")
                
        listening = False
        for hostname in hostnames:

            try:
                _xmlrpc_api_server = model.common.XMLRPCServer((hostname,CONF.getApiConInfoPort()))
                # Registers the XML-RPC introspection functions system.listMethods, system.methodHelp and system.methodSignature.
                _xmlrpc_api_server.register_introspection_functions()

                # register a function to nicely stop server
                _xmlrpc_api_server.register_function(_xmlrpc_api_server.stop_server)

                # register all the api functions to be exposed by the server
                _xmlrpc_api_server.register_function(createAndAddHost)
                _xmlrpc_api_server.register_function(createAndAddInterface)
                _xmlrpc_api_server.register_function(createAndAddServiceToInterface)
                _xmlrpc_api_server.register_function(createAndAddServiceToHost)
                _xmlrpc_api_server.register_function(createAndAddNoteToService)
                _xmlrpc_api_server.register_function(createAndAddNoteToHost)
                _xmlrpc_api_server.register_function(createAndAddNoteToNote)
                _xmlrpc_api_server.register_function(createAndAddVulnWebToService)
                _xmlrpc_api_server.register_function(createAndAddVulnToService)
                _xmlrpc_api_server.register_function(createAndAddVulnToHost)
                _xmlrpc_api_server.register_function(addHost)
                _xmlrpc_api_server.register_function(newHost)
                _xmlrpc_api_server.register_function(newService)
                _xmlrpc_api_server.register_function(devlog)

                #TODO: check if all necessary APIs are registered here!!
                listening = True
                CONF.setApiConInfo(hostname, port)
                CONF.saveConfig()

                getLogger().info(
                    "XMLRPC API server configured on %s" % str(
                        CONF.getApiConInfo()))
                break
            
            except socket.error as e:
                msg = "There was an error creating the XMLRPC API Server (Host:{}): {}".format(hostname,e)
                log(msg)
                devlog("[WARNING] - %s" % msg)

        if not listening:
               raise RuntimeError("Port already in use")
예제 #10
0
# Run object detection
results = model.detect([image], verbose=1)

# Display results
ax = get_ax(1)
r = results[0]
visualize.display_instances(image,
                            r['rois'],
                            r['masks'],
                            r['class_ids'],
                            dataset.class_names,
                            r['scores'],
                            ax=ax,
                            title="Predictions")
log("gt_class_id", gt_class_id)
log("gt_bbox", gt_bbox)
log("gt_mask", gt_mask)

# ## Color Splash
#
# This is for illustration. You can call `balloon.py` with the `splash` option to get better images without the black padding.

# In[10]:

splash = balloon.color_splash(image, r['masks'])
display_images([splash], cols=1)

# ## Step by Step Prediction

# ## Stage 1: Region Proposal Network
예제 #11
0
    model.load_weights(model_path, by_name=True)

    ###########################################################################
    #保存结果到csv
    ###########################################################################
    point_to_csv_list = []

    for x in range(0, dataset_test.num_images):
        image = dataset_test.load_image(x)  #0为图像id
        category = dataset_test.image_info[x]['image_category']  #图像类别
        image_id = dataset_test.image_info[x]['id']

        results = model.detect_keypoint([image], verbose=0)

        r = results[0]  # for one image
        log("image", image)
        log("rois", r['rois'])
        log("keypoints", r['keypoints'])
        log("class_ids", r['class_ids'])
        log("keypoints", r['keypoints'])
        error_count = 0
        try:  #统计未检测出目标的图片
            key_points = keypoint_map_to24(r['keypoints'][0],
                                           fi_class_names[0])
        except:
            key_points = np.array([[[0, 0, 0] for i in range(24)]])
            error_count += 1

        visualize.display_keypoints(image, r['rois'], r['keypoints'],
                                    r['class_ids'], dataset_test.class_names)
예제 #12
0
def segment_data_generation(mode, data_base_dir, use_edgelist=False, debug=False):
    if mode == 'both':
        dataset_types = ['val', 'test']
    else:
        dataset_types = [mode]

    caption_base_dir = 'data'
    outputs_base_dir = 'outputs'
    trained_model_dir = os.path.join(outputs_base_dir, 'snapshot')
    edgelist_result_dir = os.path.join(outputs_base_dir, 'edgelist')
    seg_data_save_base_dir = os.path.join(outputs_base_dir, 'inst_segm_output_data')
    epochs = '0100'
    model_path = os.path.join(trained_model_dir, 'mask_rcnn_sketchyscene_' + epochs + '.h5')

    dataset_class_names = ['bg']
    color_map_mat_path = os.path.join(data_base_dir, 'colorMapC46.mat')
    colorMap = scipy.io.loadmat(color_map_mat_path)['colorMap']
    for i in range(46):
        cat_name = colorMap[i][0][0]
        dataset_class_names.append(cat_name)

    ROAD_LABEL = dataset_class_names.index('road')

    CLASS_ORDERS = [[dataset_class_names.index('sun'), dataset_class_names.index('moon'),
                     dataset_class_names.index('star'), dataset_class_names.index('road')],
                    [dataset_class_names.index('tree')],
                    [dataset_class_names.index('cloud')],
                    [dataset_class_names.index('house')],
                    [dataset_class_names.index('bus'), dataset_class_names.index('car'),
                     dataset_class_names.index('truck')]]

    config = SkeSegConfig()
    model = modellib.MaskRCNN(mode="inference", config=config, model_dir='', log_dir='')

    assert model_path != "", "Provide path to trained weights"
    print("Loading weights from ", model_path)
    model.load_weights(model_path, by_name=True)

    for dataset_type in dataset_types:
        caption_json_path = os.path.join(caption_base_dir, 'sentence_instance_' + dataset_type + '.json')

        fp = open(caption_json_path, "r")
        json_data = fp.read()
        json_data = json.loads(json_data)
        print('data_len', len(json_data))

        # val/test dataset
        dataset = SketchDataset(data_base_dir)
        dataset.load_sketches(dataset_type)
        dataset.prepare()

        split_seg_data_save_base_dir = os.path.join(seg_data_save_base_dir, dataset_type)
        os.makedirs(split_seg_data_save_base_dir, exist_ok=True)

        for data_idx in range(len(json_data)):
            img_idx = json_data[data_idx]['key']
            print('Processing', dataset_type, data_idx + 1, '/', len(json_data))

            original_image, _, gt_class_id, gt_bbox, gt_mask, _ = \
                modellib.load_image_gt(dataset, config, img_idx - 1, use_mini_mask=False)

            ## 1. inference
            results = model.detect([original_image])
            r = results[0]

            pred_boxes = r["rois"]  # (nRoIs, (y1, x1, y2, x2))
            pred_class_ids = r["class_ids"]  # (nRoIs)
            pred_scores = r["scores"]
            pred_masks = r["masks"]  # (768, 768, nRoIs)

            log("pred_boxes", pred_boxes)
            log("pred_class_ids", pred_class_ids)
            log("pred_masks", pred_masks)

            ## 2. Use original_image(768, 768, 3) {0, 255} to filter pred_masks
            if config.IGNORE_BG:
                pred_masks = np.transpose(pred_masks, (2, 0, 1))  # (nRoIs, 768, 768)
                bin_input = original_image[:, :, 0] == 255
                pred_masks[:, bin_input[:, :]] = 0  # (nRoIs, 768, 768)
                pred_masks = np.transpose(pred_masks, (1, 2, 0))  # (768, 768, nRoIs)

            if debug:
                visualize.display_instances(original_image, pred_boxes, pred_masks, pred_class_ids,
                                            dataset.class_names, pred_scores, figsize=(8, 8))

            ## 3. refine pred_masks(768, 768, nRoIs) with edge-list
            if use_edgelist:
                pred_masks = \
                    refine_mask_with_edgelist(img_idx, dataset_type, data_base_dir, edgelist_result_dir,
                                              pred_masks.copy(), pred_boxes)

            ## 4. TODO: remove road prediction
            # pred_boxes = pred_boxes.tolist()
            # pred_masks = np.transpose(pred_masks, (2, 0, 1)).tolist()
            # pred_scores = pred_scores.tolist()
            # pred_class_ids = pred_class_ids.tolist()
            #
            # while ROAD_LABEL in pred_class_ids:
            #     road_idx = pred_class_ids.index(ROAD_LABEL)
            #     pred_boxes.remove(pred_boxes[road_idx])
            #     pred_masks.remove(pred_masks[road_idx])
            #     pred_scores.remove(pred_scores[road_idx])
            #     pred_class_ids.remove(ROAD_LABEL)

            ## 5. TODO: add road from semantic prediction
            # sem_label_base_path = '../../../../Sketch-Segmentation-TF/Segment-Sketch-DeepLab-v2/edge-list/pred_semantic_label_edgelist/'
            # sem_label_base_path = os.path.join(sem_label_base_path, dataset_type, 'mat')
            # sem_label_path = os.path.join(sem_label_base_path, 'L0_sample' + str(img_idx) + '.mat')
            # sem_label = scipy.io.loadmat(sem_label_path)['pred_label_edgelist']  # (750, 750), [0, 46]
            #
            # if ROAD_LABEL in sem_label:
            #     road_mask_img = np.zeros([sem_label.shape[0], sem_label.shape[1], 3], dtype=np.uint8)
            #     road_mask_img[sem_label == ROAD_LABEL] = [255, 255, 255]  # (750, 750, 3), {0, 255}
            #     road_mask_img = scipy.misc.imresize(
            #         road_mask_img, (config.IMAGE_MAX_DIM, config.IMAGE_MAX_DIM), interp='nearest')  # (768, 768, 3)
            #     road_mask = np.zeros(road_mask_img[:, :, 0].shape, dtype=np.uint8)
            #     road_mask[road_mask_img[:, :, 0] == 255] = 1  # (768, 768), {0, 1}
            #     # plt.imshow(road_mask)
            #     # plt.show()
            #
            #     road_bbox = utils.extract_bboxes(np.expand_dims(road_mask, axis=2))  # [num_instances, (y1, x1, y2, x2)]
            #     road_bbox = road_bbox[0]
            #     pred_boxes.append(road_bbox)
            #     pred_masks.append(road_mask)
            #     pred_scores.append(1.)
            #     pred_class_ids.append(ROAD_LABEL)

            # pred_boxes = np.array(pred_boxes, dtype=np.int32)
            # pred_class_ids = np.array(pred_class_ids, dtype=np.int32)
            # pred_scores = np.array(pred_scores, dtype=np.float32)
            # pred_masks = np.array(pred_masks, dtype=np.uint8)
            # pred_masks = np.transpose(pred_masks, [1, 2, 0])  # (768, 768, nRoIs?)

            if debug:
                visualize.display_instances(original_image, pred_boxes, pred_masks, pred_class_ids,
                                            dataset.class_names, pred_scores, figsize=(8, 8))

            ## 8. sort instances
            instance_sorted_index = []

            for order_idx in range(len(CLASS_ORDERS)):
                order_ids = CLASS_ORDERS[order_idx]
                for cate_idx in range(pred_class_ids.shape[0]):
                    if pred_class_ids[cate_idx] in order_ids:
                        instance_sorted_index.append(cate_idx)

            for cate_idx in range(pred_class_ids.shape[0]):
                if cate_idx not in instance_sorted_index:
                    instance_sorted_index.append(cate_idx)

            # print('pred_class_ids', pred_class_ids)
            # print('instance_sorted_index', instance_sorted_index)
            assert len(instance_sorted_index) == pred_class_ids.shape[0]

            pred_class_ids_list = []
            pred_masks_list = []
            pred_boxes_list = []

            for cate_idx_i in range(len(instance_sorted_index)):
                pred_class_ids_list.append(pred_class_ids[instance_sorted_index[cate_idx_i]])

                pred_box = pred_boxes[instance_sorted_index[cate_idx_i]]
                y1, x1, y2, x2 = pred_box
                pred_mask_large = pred_masks[:, :, instance_sorted_index[cate_idx_i]]
                pred_mask = pred_mask_large[y1: y2 + 1, x1: x2 + 1]

                pred_masks_list.append(pred_mask)
                pred_boxes_list.append(pred_box)

            # print('pred_class_ids_list', pred_class_ids_list)
            assert len(pred_class_ids_list) == pred_class_ids.shape[0]

            ## 9. generate .npz data
            npz_name = os.path.join(split_seg_data_save_base_dir, str(img_idx) + '_datas.npz')
            np.savez(npz_name, pred_class_ids=pred_class_ids_list, pred_masks=pred_masks_list,
                     pred_boxes=pred_boxes_list)
예제 #13
0
    def GET(self):
	if session.login==1:
		model.log(session.user, 0)
		Popen(["opendoor"])
	web.seeother('/door')
예제 #14
0
def results(course_name=None, lno=None, slide_name=None, curr_slide=None):
    search = forms.SearchForm(request.form)
    #if request.method == 'POST':
    model.log(request.remote_addr, request.form['search'],
              datetime.datetime.now(), 'search_query')
    return search_results(search)
    model = modellib.MaskRCNN(mode="inference",
                              config=config,
                              model_dir=MODEL_DIR)

    model_path = model.find_last()[1]

    assert model_path != "", "Provide path to trained weights"
    print("Loading weights from ", model_path)
    model.load_weights(model_path, by_name=True)

    image_id = random.choice(dataset_val.image_ids)
    original_image, image_meta, gt_class_id, gt_bbox, gt_mask = \
        modellib.load_image_gt(dataset_val, config,
                               image_id, use_mini_mask=False)

    log("original_image", original_image)
    log("image_meta", image_meta)
    log("gt_class_id", gt_class_id)
    log("gt_bbox", gt_bbox)
    log("gt_mask", gt_mask)

    mujoco_datset.make_save('gt.png')
    visualize.display_instances(original_image,
                                gt_bbox,
                                gt_mask,
                                gt_class_id,
                                dataset_train.class_names,
                                figsize=(8, 8))

    results = model.detect([original_image], verbose=1)
예제 #16
0
    # Resize
    image, window, scale, padding = utils.resize_image(
        image,
        min_dim=config.IMAGE_MIN_DIM,
        max_dim=config.IMAGE_MAX_DIM,
        padding=config.IMAGE_PADDING)
    mask = utils.resize_mask(mask, scale, padding)
    # Compute Bounding box
    bbox = utils.extract_bboxes(mask)

    for box in bbox:
        print(box, "box")

    # Display image and additional stats
    print("image_id: ", image_id, dataset.image_reference(image_id))
    print("Original shape: ", original_shape)
    log("image", image)
    log("mask", mask)
    log("class_ids", class_ids)
    log("bbox", bbox)
    # Display image and instances
    if ax:
        ax.clear()
    ax = visualize.display_instances(image,
                                     bbox,
                                     mask,
                                     class_ids,
                                     dataset.class_names,
                                     ax=ax)
    input("next?")
예제 #17
0
파일: api.py 프로젝트: MrMugiwara/faraday
def _setUpAPIServer(hostname=None, port=None):
    global _xmlrpc_api_server
    global api_conn_info
    if _xmlrpc_api_server is None:
        #TODO: some way to get defaults.. from config?
        if str(hostname) == "None":
            hostname = "localhost"
        if str(port) == "None":
            port = 9876

        if CONF.getApiConInfo() is None:
            CONF.setApiConInfo(hostname, port)
        devlog("starting XMLRPCServer with api_conn_info = %s" % str(CONF.getApiConInfo()))

        while True:

            try:
                _xmlrpc_api_server = model.common.XMLRPCServer(CONF.getApiConInfo())
                # Registers the XML-RPC introspection functions system.listMethods, system.methodHelp and system.methodSignature.
                _xmlrpc_api_server.register_introspection_functions()

                # register a function to nicely stop server
                _xmlrpc_api_server.register_function(_xmlrpc_api_server.stop_server)

                # register all the api functions to be exposed by the server
                _xmlrpc_api_server.register_function(createAndAddHost)
                _xmlrpc_api_server.register_function(createAndAddInterface)
                _xmlrpc_api_server.register_function(createAndAddServiceToInterface)
                _xmlrpc_api_server.register_function(createAndAddNoteToService)
                _xmlrpc_api_server.register_function(createAndAddNoteToHost)
                _xmlrpc_api_server.register_function(createAndAddNoteToNote)
                _xmlrpc_api_server.register_function(createAndAddVulnWebToService)
                _xmlrpc_api_server.register_function(createAndAddVulnToService)
                _xmlrpc_api_server.register_function(createAndAddVulnToHost)
                _xmlrpc_api_server.register_function(addHost)
                _xmlrpc_api_server.register_function(addInterface)
                _xmlrpc_api_server.register_function(addServiceToInterface)
                _xmlrpc_api_server.register_function(newHost)
                _xmlrpc_api_server.register_function(newInterface)
                _xmlrpc_api_server.register_function(newService)
                _xmlrpc_api_server.register_function(devlog)

                #TODO: check if all necessary APIs are registered here!!

                getLogger().info(
                    "XMLRPC API server configured on %s" % str(
                        CONF.getApiConInfo()))
                break
            except socket.error as exception:
                if exception.errno == 98:
                    # Port already in use
                    # Let's try the next one
                    port += 1
                    if port > 65535:
                        raise Exception("No ports available!")
                    CONF.setApiConInfo(hostname, port)
                    CONF.saveConfig()
                else:
                    raise exception
            except Exception as e:
                msg = "There was an error creating the XMLRPC API Server:\n%s" % str(e)
                log(msg)
                devlog("[ERROR] - %s" % msg)
예제 #18
0
cap = cv2.VideoCapture(0)
while (1):
    # get a frame
    ret, frame = cap.read()
    "BGR->RGB"
    rgb_frame = frame[:, :, ::-1]
    print(np.shape(frame))
    # Run detection
    t = time.time()
    results = model.detect_keypoint([rgb_frame], verbose=0)
    # show a frame
    t = time.time() - t
    print(1.0 / t)
    r = results[0]  # for one image
    log("rois", r['rois'])
    log("keypoints", r['keypoints'])
    log("class_ids", r['class_ids'])
    log("keypoints", r['keypoints'])
    log("masks", r['masks'])
    log("scores", r['scores'])
    result_image = display_keypoints(frame,
                                     r['rois'],
                                     r['keypoints'],
                                     r['class_ids'],
                                     class_names,
                                     skeleton=inference_config.LIMBS)

    cv2.imshow('Detect image', result_image)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break
예제 #19
0
파일: api.py 프로젝트: mlyzhong/faraday
def _setUpAPIServer(hostname=None, port=None):
    global _xmlrpc_api_server
    global api_conn_info
    if _xmlrpc_api_server is None:
        #TODO: some way to get defaults.. from config?
        if str(hostname) == "None":
            hostname = "localhost"
        if str(port) == "None":
            port = 9876

        if CONF.getApiConInfo() is None:
            CONF.setApiConInfo(hostname, port)
        devlog("starting XMLRPCServer with api_conn_info = %s" %
               str(CONF.getApiConInfo()))

        while True:

            try:
                _xmlrpc_api_server = model.common.XMLRPCServer(
                    CONF.getApiConInfo())
                # Registers the XML-RPC introspection functions system.listMethods, system.methodHelp and system.methodSignature.
                _xmlrpc_api_server.register_introspection_functions()

                # register a function to nicely stop server
                _xmlrpc_api_server.register_function(
                    _xmlrpc_api_server.stop_server)

                # register all the api functions to be exposed by the server
                _xmlrpc_api_server.register_function(createAndAddHost)
                _xmlrpc_api_server.register_function(createAndAddInterface)
                _xmlrpc_api_server.register_function(
                    createAndAddServiceToInterface)
                _xmlrpc_api_server.register_function(createAndAddNoteToService)
                _xmlrpc_api_server.register_function(createAndAddNoteToHost)
                _xmlrpc_api_server.register_function(createAndAddNoteToNote)
                _xmlrpc_api_server.register_function(
                    createAndAddVulnWebToService)
                _xmlrpc_api_server.register_function(createAndAddVulnToService)
                _xmlrpc_api_server.register_function(createAndAddVulnToHost)
                _xmlrpc_api_server.register_function(addHost)
                _xmlrpc_api_server.register_function(addInterface)
                _xmlrpc_api_server.register_function(addServiceToInterface)
                _xmlrpc_api_server.register_function(newHost)
                _xmlrpc_api_server.register_function(newInterface)
                _xmlrpc_api_server.register_function(newService)
                _xmlrpc_api_server.register_function(devlog)

                #TODO: check if all necessary APIs are registered here!!

                getLogger().info("XMLRPC API server configured on %s" %
                                 str(CONF.getApiConInfo()))
                break
            except socket.error as exception:
                if exception.errno == 98:
                    # Port already in use
                    # Let's try the next one
                    port += 1
                    if port > 65535:
                        raise Exception("No ports available!")
                    CONF.setApiConInfo(hostname, port)
                    CONF.saveConfig()
                elif exception.errno == 48:
                    # Address already open
                    # Another instance of faraday.py already running
                    raise Exception(
                        "Another instance of faraday.py already running!")
                else:
                    raise exception
            except Exception as e:
                msg = "There was an error creating the XMLRPC API Server:\n%s" % str(
                    e)
                log(msg)
                devlog("[ERROR] - %s" % msg)
예제 #20
0
def segment_inference(**kwargs):
    data_base_dir = kwargs['data_base_dir']
    dataset_type = kwargs['dataset_type']
    image_id = kwargs['image_id']
    epochs = kwargs['epochs']
    use_edgelist = kwargs['use_edgelist']

    outputs_base_dir = 'outputs'
    vis_result_save_dir = os.path.join(outputs_base_dir, 'visual_result',
                                       dataset_type)
    trained_model_dir = os.path.join(outputs_base_dir, 'snapshot')
    edgelist_result_dir = os.path.join(outputs_base_dir, 'edgelist')
    model_path = os.path.join(trained_model_dir,
                              'mask_rcnn_sketchyscene_' + epochs + '.h5')

    os.makedirs(vis_result_save_dir, exist_ok=True)

    config = SketchInferConfig()
    config.display()

    # val/test dataset
    dataset_infer = SketchDataset(data_base_dir)
    dataset_infer.load_sketches(dataset_type)
    dataset_infer.prepare()

    # Recreate the model in inference mode
    model = modellib.MaskRCNN(mode="inference",
                              config=config,
                              model_dir='',
                              log_dir='')

    # Load trained weights (fill in path to trained weights here)
    assert model_path != "", "Provide path to trained weights"
    print("Loading weights from ", model_path)
    model.load_weights(model_path, by_name=True)

    original_image, _, gt_class_id, gt_bbox, gt_mask, _ = \
        modellib.load_image_gt(dataset_infer, config, image_id - 1, use_mini_mask=False)

    log("original_image", original_image)
    log("gt_class_id", gt_class_id)
    log("gt_bbox", gt_bbox)
    log("gt_mask", gt_mask)

    gt_seg_path = os.path.join(vis_result_save_dir, str(image_id) + '_gt.png')
    visualize.display_instances(original_image,
                                gt_bbox,
                                gt_mask,
                                gt_class_id,
                                dataset_infer.class_names,
                                title='Ground-Truth',
                                save_path=gt_seg_path,
                                fix_color=True)

    ## inference
    results = model.detect([original_image], verbose=1)
    r = results[0]

    pred_boxes = r["rois"]  # (nRoIs, (y1, x1, y2, x2))
    pred_class_ids = r["class_ids"]  # (nRoIs)
    pred_scores = r["scores"]  # (nRoIs)
    pred_masks = r["masks"]  # (768, 768, nRoIs)
    log("pred_boxes", pred_boxes)
    log("pred_masks", pred_masks)

    if config.IGNORE_BG:
        # Use original_image(768, 768, 3) {0, 255} to filter pred_masks
        pred_masks = np.transpose(pred_masks, (2, 0, 1))  # (nRoIs, 768, 768)
        bin_input = original_image[:, :, 0] == 255
        pred_masks[:, bin_input[:, :]] = 0  # (nRoIs, 768, 768)
        pred_masks = np.transpose(pred_masks, (1, 2, 0))  # (768, 768, nRoIs)

    # refine pred_masks(768, 768, nRoIs) with edge-list
    if use_edgelist:
        refined_pred_masks = \
            refine_mask_with_edgelist(image_id, dataset_type, data_base_dir, edgelist_result_dir,
                                      pred_masks.copy(), pred_boxes)

    # caculating AP
    iou_thresholds = np.linspace(.5,
                                 0.95,
                                 np.round((0.95 - .5) / .05) + 1,
                                 endpoint=True)
    APs = np.zeros([len(iou_thresholds)], dtype=np.float32)
    APs_edg = np.zeros([len(iou_thresholds)], dtype=np.float32)
    for i in range(len(iou_thresholds)):
        iouThr = iou_thresholds[i]
        AP, precisions, recalls, overlaps = \
            utils.compute_ap(gt_bbox, gt_class_id, gt_mask,
                             pred_boxes, pred_class_ids, pred_scores, pred_masks,
                             iou_threshold=iouThr)
        APs[i] = AP

        if use_edgelist:
            AP_edg, precisions, recalls, overlaps = \
                utils.compute_ap(gt_bbox, gt_class_id, gt_mask,
                                 pred_boxes, pred_class_ids, pred_scores, refined_pred_masks,
                                 iou_threshold=iouThr)
            APs_edg[i] = AP_edg

    mAP = np.mean(APs)
    mAP_edg = np.mean(APs_edg)
    print('APs', APs)
    print('mAP', mAP)
    print('APs_edg', APs_edg)
    print('mAP_edg', mAP_edg)

    # save visual results
    visual_seg_path = os.path.join(vis_result_save_dir,
                                   str(image_id) + '_seg.png')
    visualize.display_instances(original_image,
                                pred_boxes,
                                pred_masks,
                                pred_class_ids,
                                dataset_infer.class_names,
                                pred_scores,
                                title='Normal result',
                                save_path=visual_seg_path,
                                fix_color=True)

    if use_edgelist:
        visual_seg_edg_path = os.path.join(vis_result_save_dir,
                                           str(image_id) + '_seg_edgelist.png')
        visualize.display_instances(original_image,
                                    pred_boxes,
                                    refined_pred_masks,
                                    pred_class_ids,
                                    dataset_infer.class_names,
                                    pred_scores,
                                    title='Result with edgelist',
                                    save_path=visual_seg_edg_path,
                                    fix_color=True)
예제 #21
0
def exp_raw(dtype):
  shp = (None, 3, 256, 256)
  input_var = T.tensor4('input_var', dtype = 'float32')
  psp = T.dmatrix("psp")
  network = OrderedDict()
  network['input'] = lasagne.layers.InputLayer(shape = shp, input_var = input_var)
  # network = make_vgg16(network, 'model/vgg16_weights_from_caffe.h5')
  # First conv and segmentation part
  network['conv1_1'] = lasagne.layers.Conv2DLayer(network['input'],
    num_filters = 64, filter_size = (3, 3),nonlinearity = lasagne.nonlinearities.rectify,
    W=lasagne.init.GlorotUniform())
  network['conv1_2'] = lasagne.layers.Conv2DLayer(network['conv1_1'],
    num_filters = 64, filter_size = (3, 3), nonlinearity = lasagne.nonlinearities.rectify)
  network['pool1_1'] = lasagne.layers.MaxPool2DLayer(network['conv1_2'], pool_size = (2, 2))
  network['norm1_1'] = lasagne.layers.BatchNormLayer(network['pool1_1'])

  network['conv1_3'] = lasagne.layers.Conv2DLayer(network['norm1_1'],
    num_filters = 128, filter_size = (3, 3), nonlinearity = lasagne.nonlinearities.rectify)
  network['conv1_4'] = lasagne.layers.Conv2DLayer(network['conv1_3'],
    num_filters = 128, filter_size = (3, 3), nonlinearity = lasagne.nonlinearities.rectify)
  network['pool1_2'] = lasagne.layers.MaxPool2DLayer(network['conv1_4'], pool_size = (2, 2))
  network['norm1_2'] = lasagne.layers.BatchNormLayer(network['pool1_2'])

  network['conv1_5'] = lasagne.layers.Conv2DLayer(network['norm1_2'],
    num_filters = 256, filter_size = (3, 3), nonlinearity = lasagne.nonlinearities.rectify)
  network['pool1_3'] = lasagne.layers.MaxPool2DLayer(network['conv1_5'], pool_size = (2, 2))

  network['conv1_6'] = lasagne.layers.Conv2DLayer(network['pool1_3'],
    num_filters = 256, filter_size = (3, 3), nonlinearity = lasagne.nonlinearities.rectify)
  network['pool1_4'] = lasagne.layers.MaxPool2DLayer(network['conv1_6'], pool_size = (2, 2))

  # Perspective Transform
  network['norm2'] = lasagne.layers.BatchNormLayer(network['pool1_4'])
  # network['cast'] = CastingLayer(network['norm2'], dtype)
  theano.config.floatX = dtype 
  network['pfc2_1'] = lasagne.layers.DenseLayer(
    lasagne.layers.dropout(network['norm2'], p = 0.05),
    num_units = 1024, nonlinearity = lasagne.nonlinearities.rectify)
  network['pfc2_2'] = lasagne.layers.DenseLayer(
    lasagne.layers.dropout(network['pfc2_1'], p=0.05),
    num_units = 1024, nonlinearity = lasagne.nonlinearities.rectify)
  network['pfc2_3'] = lasagne.layers.DenseLayer(
    lasagne.layers.dropout(network['pfc2_2'], p=0.05),
    num_units = 1024, nonlinearity = lasagne.nonlinearities.rectify)
  # loss target 2
  network['pfc_out'] = lasagne.layers.DenseLayer(
    lasagne.layers.dropout(network['pfc2_3'], p = 0.05),
    num_units = 8, nonlinearity = lasagne.nonlinearities.rectify)
  theano.config.floatX = 'float32'

  predict = lasagne.layers.get_output(network['pfc_out'])
  loss = T.sqrt(lasagne.objectives.squared_error(predict, psp).mean())
  paras = lasagne.layers.get_all_params(network['pfc_out'], trainable = True)
  updates = adam(loss, paras, [theano.shared(np.float32(0.0001)) for i in range(len(paras))])
  ftrain = theano.function([input_var, psp], [loss, predict], updates = updates)

  def get_inputs(meta, batch, path):
    # batchidx = [keys[i] for i in batch]
    input = np.array([read_image(path + 'patch/' + idx + '.jpg', shape = (256, 256))
      for idx in batch]).astype(np.float32)
    seg = np.array([read_image(path + 'pmask/' + idx + '.jpg', shape = (256, 256))
      for idx in batch]).astype(np.float32)
    dat = [meta[key] for key in batch]
    Ps = np.array([np.array(dat[i][0]).flatten()[0 : 8] for i in range(len(batch))])
    for P in Ps:
      P[6 : 8] = (P[6 : 8] + 1e-3) * 1e4
    return input, Ps

  path = '/home/yancz/text_generator/data/real/'
  dat, meta = load_data(path, 10000, False)
  for epoch in range(10):
    loss = 0
    trs = 0
    for batch in iterate_minibatch(dat['train'], 32, len(dat['train'])):
      inputs = get_inputs(meta, batch, path)
      l, valp = ftrain(*inputs)
      log(l)
      print(valp)
      loss += l
      trs += 1
    loss /= trs
    log('loss ' + str(epoch) + ' ' + str(l))
  return ftrain
예제 #22
0
                                  config=inference_config,
                                  model_dir="./model")

        # Get path to saved weights
        # Either set a specific path or find last trained weights
        # model_path = os.path.join(ROOT_DIR, ".h5 file name here")
        # model_path = model.find_last()
        model.load_weights(model_path, by_name=True)
        iid = np.random.choice(Validate.image_ids)
        original_image, image_meta, gt_class_id, gt_bbox, gt_mask =\
        modellib.load_image_gt(Validate, inference_config,
                            iid, use_mini_mask=False)

        results = model.detect([original_image], verbose=1)
        r = results[0]
        log("original_image", original_image)
        log("image_meta", image_meta)
        print(gt_class_id)
        if len(r["class_ids"]) == 0 or len(gt_class_id) == 0: pass
        visualize.display_instances(original_image,
                                    gt_bbox,
                                    gt_mask,
                                    gt_class_id,
                                    Train.class_names,
                                    figsize=(8, 8))
        visualize.display_instances(original_image,
                                    r['rois'],
                                    r['masks'],
                                    r['class_ids'],
                                    Train.class_names,
                                    r['scores'],