示例#1
0
def frcn_predictor(features, rois, n_classes, model_path):
    # Load the pretrained classification net and find nodes
    loaded_model = load_model(model_path)
    feature_node = find_by_name(loaded_model, feature_node_name)
    conv_node = find_by_name(loaded_model, last_conv_node_name)
    pool_node = find_by_name(loaded_model, pool_node_name)
    last_node = find_by_name(loaded_model, last_hidden_node_name)

    # Clone the conv layers and the fully connected layers of the network
    conv_layers = combine([conv_node.owner
                           ]).clone(CloneMethod.freeze,
                                    {feature_node: placeholder()})
    fc_layers = combine([last_node.owner]).clone(CloneMethod.clone,
                                                 {pool_node: placeholder()})

    # Create the Fast R-CNN model
    feat_norm = features - Constant(114)
    conv_out = conv_layers(feat_norm)
    roi_out = roipooling(conv_out, rois, C.MAX_POOLING, (roi_dim, roi_dim),
                         0.0625)
    fc_out = fc_layers(roi_out)

    # z = Dense(rois[0], num_classes, map_rank=1)(fc_out)  # --> map_rank=1 is not yet supported
    W = parameter(shape=(4096, n_classes), init=glorot_uniform())
    b = parameter(shape=n_classes, init=0)
    z = times(fc_out, W) + b

    return z
示例#2
0
def frcn_predictor(features, rois, n_classes, model_path):
    # Load the pretrained classification net and find nodes

    loaded_model = load_model(model_path)
    feature_node = find_by_name(loaded_model, feature_node_name)
    conv_node    = find_by_name(loaded_model, last_conv_node_name)
    pool_node    = find_by_name(loaded_model, pool_node_name)
    last_node    = find_by_name(loaded_model, last_hidden_node_name)

    # Clone the conv layers and the fully connected layers of the network
    conv_layers = combine([conv_node.owner]).clone(CloneMethod.freeze, {feature_node: placeholder()})
    fc_layers = combine([last_node.owner]).clone(CloneMethod.clone, {pool_node: placeholder()})

    # Create the Fast R-CNN model
    feat_norm = features - Constant(114)
    conv_out  = conv_layers(feat_norm)
    roi_out   = roipooling(conv_out, rois, (roi_dim, roi_dim))
    fc_out    = fc_layers(roi_out)

    # z = Dense(rois[0], num_classes, map_rank=1)(fc_out)  # --> map_rank=1 is not yet supported
    W = parameter(shape=(4096, n_classes), init=glorot_uniform())
    b = parameter(shape=n_classes, init=0)
    z = times(fc_out, W) + b

    return z
def create_model(base_model_file,
                 input_features,
                 num_classes,
                 dropout_rate=0.5,
                 freeze_weights=False):
    # Load the pretrained classification net and find nodes
    base_model = load_model(base_model_file)
    feature_node = find_by_name(base_model, 'features')
    beforePooling_node = find_by_name(base_model, "z.x.x.r")
    #graph.plot(base_model, filename="base_model.pdf") # Write graph visualization

    # Clone model until right before the pooling layer, ie. until including z.x.x.r
    modelCloned = combine([beforePooling_node.owner]).clone(
        CloneMethod.freeze if freeze_weights else CloneMethod.clone,
        {feature_node: placeholder(name='features')})

    # Center the input around zero and set model input.
    # Do this early, to avoid CNTK bug with wrongly estimated layer shapes
    feat_norm = input_features - constant(114)
    model = modelCloned(feat_norm)

    # Pool over all spatial dimensions and add dropout layer
    avgPool = GlobalAveragePooling(name="poolingLayer")(model)
    if dropout_rate > 0:
        avgPoolDrop = Dropout(dropout_rate)(avgPool)
    else:
        avgPoolDrop = avgPool

    # Add new dense layer for class prediction
    finalModel = Dense(num_classes, activation=None,
                       name="prediction")(avgPoolDrop)
    return finalModel
def frcn_predictor(features, rois, n_classes, base_path):
    # model specific variables for AlexNet
    model_file = base_path + "/../../../resources/cntk/AlexNet.model"
    roi_dim = 6
    feature_node_name = "features"
    last_conv_node_name = "conv5.y"
    pool_node_name = "pool3"
    last_hidden_node_name = "h2_d"

    # Load the pretrained classification net and find nodes
    print("Loading pre-trained model...")
    loaded_model = load_model(model_file)
    print("Loading pre-trained model... DONE.")
    feature_node = find_by_name(loaded_model, feature_node_name)
    conv_node    = find_by_name(loaded_model, last_conv_node_name)
    pool_node    = find_by_name(loaded_model, pool_node_name)
    last_node    = find_by_name(loaded_model, last_hidden_node_name)

    # Clone the conv layers and the fully connected layers of the network
    conv_layers = combine([conv_node.owner]).clone(CloneMethod.freeze, {feature_node: placeholder()})
    fc_layers   = combine([last_node.owner]).clone(CloneMethod.clone,  {pool_node: placeholder()})

    # Create the Fast R-CNN model
    feat_norm = features - constant(114)
    conv_out  = conv_layers(feat_norm)
    roi_out   = roipooling(conv_out, rois, (roi_dim, roi_dim))
    fc_out    = fc_layers(roi_out)
    #fc_out.set_name("fc_out")

    # z = Dense(rois[0], num_classes, map_rank=1)(fc_out)  # --> map_rank=1 is not yet supported
    W = parameter(shape=(4096, n_classes), init=glorot_uniform())
    b = parameter(shape=n_classes, init=0)
    z = times(fc_out, W) + b
    return z, fc_out
示例#5
0
def clone_model(base_model, from_node_names, to_node_names, clone_method):
    from_nodes = [find_by_name(base_model, node_name) for node_name in from_node_names]
    if None in from_nodes:
        print("Error: could not find all specified 'from_nodes' in clone. Looking for {}, found {}"
              .format(from_node_names, from_nodes))
    to_nodes = [find_by_name(base_model, node_name) for node_name in to_node_names]
    if None in to_nodes:
        print("Error: could not find all specified 'to_nodes' in clone. Looking for {}, found {}"
              .format(to_node_names, to_nodes))
    input_placeholders = dict(zip(from_nodes, [placeholder() for x in from_nodes]))
    cloned_net = combine(to_nodes).clone(clone_method, input_placeholders)
    return cloned_net
示例#6
0
def clone_model(base_model, from_node_names, to_node_names, clone_method):
    from_nodes = [find_by_name(base_model, node_name) for node_name in from_node_names]
    if None in from_nodes:
        print("Error: could not find all specified 'from_nodes' in clone. Looking for {}, found {}"
              .format(from_node_names, from_nodes))
    to_nodes = [find_by_name(base_model, node_name) for node_name in to_node_names]
    if None in to_nodes:
        print("Error: could not find all specified 'to_nodes' in clone. Looking for {}, found {}"
              .format(to_node_names, to_nodes))
    input_placeholders = dict(zip(from_nodes, [placeholder() for x in from_nodes]))
    cloned_net = combine(to_nodes).clone(clone_method, input_placeholders)
    return cloned_net
def modify_model(features, n_classes):
    loaded_model = load_model(model_file)
    feature_node = find_by_name(loaded_model, 'features')
    last_node = find_by_name(loaded_model, 'h2_d')
    all_layers = combine([last_node.owner
                          ]).clone(CloneMethod.freeze,
                                   {feature_node: placeholder()})

    feat_norm = features - Constant(114)
    fc_out = all_layers(feat_norm)
    z = Dense(num_classes)(fc_out)

    return (z)
示例#8
0
    def load_model(self):
        if self.__model:
            raise Exception("Model already loaded")

        trained_frcnn_model = load_model(self.__model_path)

        # cache indices of the model arguments
        args_indices = {}
        for i, arg in enumerate(trained_frcnn_model.arguments):
            args_indices[arg.name] = i

        self.__nr_rois = trained_frcnn_model.arguments[
            args_indices["rois"]].shape[0]
        self.__resize_width = trained_frcnn_model.arguments[
            args_indices["features"]].shape[1]
        self.__resize_height = trained_frcnn_model.arguments[
            args_indices["features"]].shape[2]
        self.labels_count = trained_frcnn_model.arguments[
            args_indices["roiLabels"]].shape[1]

        # next, we adjust the clone the model and create input nodes just for the features (image) and ROIs
        # This will make sure that only the calculations that are needed for evaluating images are performed
        # during test time
        #
        # find the original features and rois input nodes
        features_node = find_by_name(trained_frcnn_model, "features")
        rois_node = find_by_name(trained_frcnn_model, "rois")

        #  find the output "z" node
        z_node = find_by_name(trained_frcnn_model, 'z')

        # define new input nodes for the features (image) and rois
        image_input = input_variable(features_node.shape, name='features')
        roi_input = input_variable(rois_node.shape, name='rois')

        # Clone the desired layers with fixed weights and place holder for the new input nodes
        cloned_nodes = combine([z_node.owner]).clone(
            CloneMethod.freeze, {
                features_node: placeholder(name='features'),
                rois_node: placeholder(name='rois')
            })

        # apply the cloned nodes to the input nodes to obtain the model for evaluation
        self.__model = cloned_nodes(image_input, roi_input)

        # cache the indices of the input nodes
        self.__args_indices = {}

        for i, arg in enumerate(self.__model.arguments):
            self.__args_indices[arg.name] = i
def runCntkModel(model, map_file, node_name=[], mb_size=1):
    # Get minibatch source
    num_classes = model.shape[0]
    (image_width, image_height) = find_by_name(model, "input").shape[1:]
    minibatch_source = create_mb_source(map_file, image_width, image_height, 3,
                                        num_classes, False)
    features_si = minibatch_source['features']

    # Set output node
    if node_name == []:
        output_node = model
    else:
        node_in_graph = model.find_by_name(node_name)
        output_node = combine([node_in_graph.owner])

    # Evaluate DNN for all images
    feats = []
    sample_counts = 0
    imgPaths = getColumn(readTable(map_file), 0)
    while sample_counts < len(imgPaths):
        sample_count = min(mb_size, len(imgPaths) - sample_counts)
        mb = minibatch_source.next_minibatch(sample_count)
        output = output_node.eval(mb[features_si])
        feats += [o.flatten() for o in output]
        sample_counts += sample_count
        if sample_counts % 100 < mb_size:
            print(
                "Evaluating DNN (output dimension = {}) for image {} of {}: {}"
                .format(len(feats[-1]), sample_counts, len(imgPaths),
                        imgPaths[sample_counts - 1]))
    data = [[imgPath, feat] for imgPath, feat in zip(imgPaths, feats)]
    return data
def create_model(base_model_file, feature_node_name, last_hidden_node_name, num_classes, input_features, freeze=False):
    # Load the pretrained classification net and find nodes
    base_model   = load_model(base_model_file)
    feature_node = find_by_name(base_model, feature_node_name)
    last_node    = find_by_name(base_model, last_hidden_node_name)

    # Clone the desired layers with fixed weights
    cloned_layers = combine([last_node.owner]).clone(
        CloneMethod.freeze if freeze else CloneMethod.clone,
        {feature_node: placeholder(name='features')})

    # Add new dense layer for class prediction
    feat_norm  = input_features - Constant(114)
    cloned_out = cloned_layers(feat_norm)
    z          = Dense(num_classes, activation=None, name=new_output_node_name) (cloned_out)

    return z
示例#11
0
def create_model(base_model_file, input_features, params):
    num_classes = params['num_classes']
    dropout_rate = params['dropout_rate']
    freeze_weights = params['freeze_weights']

    # Load the pretrained classification net and find nodes
    base_model = load_model(base_model_file)
    log = logging.getLogger("neuralnets1.utils.create_model")
    log.info('Loaded base model - %s with layers:' % base_model_file)
    node_outputs = get_node_outputs(base_model)
    [log.info('%s , %s' % (layer.name, layer.shape)) for layer in node_outputs]
    graph.plot(base_model,
               filename="base_model.pdf")  # Write graph visualization

    feature_node = find_by_name(base_model, 'features')
    beforePooling_node = find_by_name(base_model, "z.x.x.r")

    # Clone model until right before the pooling layer, ie. until including z.x.x.r
    modelCloned = combine([beforePooling_node.owner]).clone(
        CloneMethod.freeze if freeze_weights else CloneMethod.clone,
        {feature_node: placeholder(name='features')})

    # Center the input around zero and set model input.
    # Do this early, to avoid CNTK bug with wrongly estimated layer shapes
    feat_norm = input_features - constant(114)
    model = modelCloned(feat_norm)

    # Add pool layer
    avgPool = GlobalAveragePooling(name="poolingLayer")(
        model)  # assign name to the layer and add to the model
    # Add drop out layer
    if dropout_rate > 0:
        avgPoolDrop = Dropout(dropout_rate)(
            avgPool
        )  # add drop out layer with specified drop out rate and add it to the model
    else:
        avgPoolDrop = avgPool

    # Add new dense layer for class prediction
    finalModel = Dense(num_classes, activation=None, name="Dense")(avgPoolDrop)
    return finalModel
示例#12
0
def runCntkModelAllImages(model, classes, imgDir, mbs, node_name, mb_size=1):
    log = logging.getLogger("neuralnets.utils.runCntkModelAllImages")
    # Create empty dnn output dictionary
    #dnnOutput = dict()
    #for label in classes:
    #	dnnOutput[label] = dict()

    imgPaths = [line.strip('\n')
                for line in open(imgDir)]  # Prepare cntk input
    # Run CNTK model for each image
    num_classes = model.shape[0]
    image_dimensions = find_by_name(model, "input").shape[::-1]

    # Set output node
    if node_name == []:
        output_node = model  # use final pred layer
    else:
        node_in_graph = model.find_by_name(
            node_name)  # gives poolingLayer output 512*1*1
        output_node = combine([node_in_graph.owner])  # Set output node

    # Evaluate DNN for all images
    feats = []
    labels = []
    sample_counts = 0
    while sample_counts < len(imgPaths):
        sample_count = min(mb_size, len(imgPaths) - sample_counts)
        mb = mbs.next_minibatch(sample_count)
        output = output_node.eval(mb[mbs['features']])
        prev_count = sample_counts
        sample_counts += sample_count
        for path, o in zip(imgPaths[prev_count:sample_counts], output):
            feat = o.flatten()
            feat /= np.linalg.norm(feat,
                                   2)  #normalizing the features for this image
            (imgFilename, imgSubdir) = os.path.basename(path).split()
            #dnnOutput[int(imgSubdir)][imgFilename] = feat
            feats.append(np.float32(feat))
            labels.append(int(imgSubdir))

        if sample_counts % 100 < mb_size:
            log.info(
                "Evaluating DNN (output dimension = %s) for image %s of %s: %s"
                % (len(feats[-1]), sample_counts, len(imgPaths), imgFilename))
    #repeat for train and test
    return feats, labels