예제 #1
0
    img[:, :, 0] = cv2.equalizeHist(img[:, :, 0])
    img[:, :, 1] = cv2.equalizeHist(img[:, :, 1])
    img[:, :, 2] = cv2.equalizeHist(img[:, :, 2])

    #Image Resizing
    img = cv2.resize(img, (img_width, img_height),
                     interpolation=cv2.INTER_CUBIC)

    return img


'''
Reading mean image, caffe model and its weights 
'''
#Read mean image
mean_blob = caffe_pb2.BlobProto()
#with open('/home/ubuntu/deeplearning-cats-dogs-tutorial/input/mean.binaryproto') as f:
#with open('/home/s2c/pkg/local/caffe-master_cuDNN/data/ilsvrc12/imagenet_mean.binaryproto.bk', 'rb') as f:
with open('./imagenet_mean.binaryproto', 'rb') as f:
    mean_blob.ParseFromString(f.read())
mean_array = np.asarray(mean_blob.data, dtype=np.float32).reshape(
    (mean_blob.channels, mean_blob.height, mean_blob.width))

print('mean_array type = ', type(mean_array))
print('mean array shape = ', mean_array.shape)
print('mean_blob channels = ', mean_blob.channels)
print('mean_blob height = ', mean_blob.height)
print('mean_blob width = ', mean_blob.width)

#Read model architecture and trained model's weights
net = caffe.Net('./deploy.prototxt', './bvlc_alexnet.caffemodel', caffe.TEST)
예제 #2
0
    def Predict(self):

        colours = {
            1: 'black',
            2: 'blue',
            3: 'brown',
            4: 'darkblue',
            5: 'gold',
            6: 'green',
            7: 'maroon',
            8: 'orange',
            9: 'red',
            10: 'silver',
            11: 'white',
            12: 'yellow'
        }
        types = {'suv': 1, 'hatchback': 2, 'sedan': 3, 'carrier': 4, 'van': 5}
        mean_blob = caffe_pb2.BlobProto()
        with open('input/mean.binaryproto') as f:
            mean_blob.ParseFromString(f.read())
        mean_array = np.asarray(mean_blob.data, dtype=np.float32).reshape(
            (mean_blob.channels, mean_blob.height, mean_blob.width))
        net = caffe.Net('deploy.prototxt', 'caffe_model_iter_100.caffemodel',
                        caffe.TEST)
        net2 = caffe.Net('deploy2.prototxt', 'caffe_type_iter_100.caffemodel',
                         caffe.TEST)

        transformer = caffe.io.Transformer(
            {'data': net.blobs['data'].data.shape})
        #transformer.set_mean('data', mean_array)
        transformer.set_transpose('data', (2, 0, 1))

        test_img_paths = [str(self.filename)]

        test_ids = []
        preds = []
        preds2 = []
        for img_path in test_img_paths:
            img = cv2.imread(img_path, cv2.IMREAD_COLOR)
            img = self.transform_img(img,
                                     img_width=IMAGE_WIDTH,
                                     img_height=IMAGE_HEIGHT)

            net.blobs['data'].data[...] = transformer.preprocess('data', img)
            net2.blobs['data'].data[...] = transformer.preprocess('data', img)
            out = net.forward(data=np.asarray(net.blobs['data'].data[...]))
            out2 = net2.forward(data=np.asarray(net.blobs['data'].data[...]))
            pred_probas = out['prob']
            pred_probas2 = out2['prob']
            highest = pred_probas[0][0]
            highest2 = pred_probas2[0][0]
            index = 0
            index2 = 0
            test_ids = test_ids + [img_path]

            result_string = ""
            for x in range(len(colours)):
                if pred_probas[0][x] > highest:
                    highest = pred_probas[0][x]
                    index = x
            for x in range(len(types)):
                if pred_probas2[0][x] > highest2:
                    highest2 = pred_probas[0][x]
                    index2 = x
            print img_path
            print colours[index + 1]
            print types[index2]
            print '-------'
            result_string = colours[index + 1] + " " + types[index2 + 1]

        with open("results.csv", "w") as f:

            f.write("id,label\n")
            for i in range(len(test_ids)):

                f.write(str(test_ids[i]) + "," + str(preds[i]) + "\n")
        self.ui.result.setText(result_string)
        f.close()
예제 #3
0
    def TranslateModel(
        cls,
        caffe_net,
        pretrained_net,
        is_test=False,
        input_mean=None,
        net_state=None,
    ):
        net_state = caffe_pb2.NetState() if net_state is None else net_state
        net = caffe2_pb2.NetDef()
        net.name = caffe_net.name
        net_params = caffe2_pb2.TensorProtos()
        if len(caffe_net.layer) == 0:
            raise ValueError(
                'I think something is wrong. This translation script '
                'only accepts new style layers that are stored in the '
                'layer field.')
        if input_mean:
            caffenet_mean = caffe_pb2.BlobProto()
            caffenet_mean.ParseFromString(open(input_mean, 'rb').read())
            mean_ = utils.CaffeBlobToNumpyArray(caffenet_mean)
            mean_tensor = utils.NumpyArrayToCaffe2Tensor(mean_, 'mean_')
            net_params.protos.extend([mean_tensor])
            mean_op = caffe2_pb2.OperatorDef()
            mean_op.type = 'Sub'
            mean_op.input.extend(['data_', 'mean_'])
            # Assume that input blob's name is "data"
            mean_op.output.extend(['data'])
            net.op.extend([mean_op])
        i = 0
        while i < len(caffe_net.layer):
            if not _ShouldInclude(net_state, caffe_net.layer[i]):
                log.info('Current net state does not need layer {}'.format(
                    caffe_net.layer[i].name))
                continue
            log.info('Translate layer {}'.format(caffe_net.layer[i].name))
            # Get pretrained one
            pretrained_layers_index = ([
                l for l in xrange(len(pretrained_net.layer))
                if pretrained_net.layer[l].name == caffe_net.layer[i].name
            ] + [
                l for l in xrange(len(pretrained_net.layers))
                if pretrained_net.layers[l].name == caffe_net.layer[i].name
            ])
            is_bn = False
            if len(pretrained_layers_index) > 1:
                raise ValueError(
                    'huh? more than one pretrained layer of one name?')
            elif len(pretrained_layers_index) == 1:
                if pretrained_net.layer[
                        pretrained_layers_index[0]].type == "BatchNorm":
                    # A Scale layer should follow BatchNorm layer
                    # according to paper https://arxiv.org/abs/1502.03167.
                    assert pretrained_net.layer[pretrained_layers_index[0] +
                                                1].type == "Scale"
                    pretrained_blobs = [utils.CaffeBlobToNumpyArray(blob)
                    for blob in pretrained_net.layer[pretrained_layers_index[0]].blobs] + \
                        [utils.CaffeBlobToNumpyArray(blob)
                    for blob in pretrained_net.layer[pretrained_layers_index[0] + 1].blobs]
                    is_bn = True
                else:
                    pretrained_blobs = [
                        utils.CaffeBlobToNumpyArray(blob) for blob in
                        pretrained_net.layer[pretrained_layers_index[0]].blobs
                    ]
            else:
                # No pretrained layer for the given layer name. We'll just pass
                # no parameter blobs.
                # print 'No pretrained layer for layer', layer.name
                pretrained_blobs = []

            operators, params = cls.TranslateLayer(caffe_net.layer[i],
                                                   pretrained_blobs, is_test)
            net.op.extend(operators)
            net_params.protos.extend(params)
            if is_bn:
                i += 2
            else:
                i += 1
        return net, net_params
예제 #4
0
    def load_weights(self, caffemodel):
        if self.has_mean:
            print('mean_file', self.mean_file)
            mean_blob = caffe_pb2.BlobProto()
            mean_blob.ParseFromString(open(self.mean_file, 'rb').read())

            if 'input_shape' in self.net_info['props']:
                channels = int(self.net_info['props']['input_shape']['dim'][1])
                height = int(self.net_info['props']['input_shape']['dim'][2])
                width = int(self.net_info['props']['input_shape']['dim'][3])
            else:
                channels = int(self.net_info['props']['input_dim'][1])
                height = int(self.net_info['props']['input_dim'][2])
                width = int(self.net_info['props']['input_dim'][3])

            mu = np.array(mean_blob.data)
            mu.resize(channels, height, width)
            mu = mu.mean(1).mean(1)
            mean_img = torch.from_numpy(mu).view(channels, 1, 1).expand(
                channels, height, width).float()

            self.register_buffer('mean_img',
                                 torch.zeros(channels, height, width))
            self.mean_img.copy_(mean_img)

        model = parse_caffemodel(caffemodel)
        layers = model.layer
        if len(layers) == 0:
            print('Using V1LayerParameter')
            layers = model.layers

        lmap = {}
        for l in layers:
            lmap[l.name] = l

        layers = self.net_info['layers']
        layer_num = len(layers)
        i = 0
        while i < layer_num:
            layer = layers[i]
            lname = layer['name']
            if 'include' in layer and 'phase' in layer['include']:
                phase = layer['include']['phase']
                lname = lname + '.' + phase
            ltype = layer['type']
            # if not lmap.has_key(lname):
            if lname not in lmap:
                i = i + 1
                continue
            if ltype in ['Convolution', 'Deconvolution']:
                print('load weights %s' % lname)
                convolution_param = layer['convolution_param']
                bias = True
                # if convolution_param.has_key('bias_term') and convolution_param['bias_term'] == 'false':
                if 'bias_term' in convolution_param and convolution_param[
                        'bias_term'] == 'false':
                    bias = False
                #weight_blob = lmap[lname].blobs[0]
                #print('caffe weight shape', weight_blob.num, weight_blob.channels, weight_blob.height, weight_blob.width)
                caffe_weight = np.array(lmap[lname].blobs[0].data)
                caffe_weight = torch.from_numpy(caffe_weight).view_as(
                    self.models[lname].weight)
                self.models[lname].weight.data.copy_(caffe_weight)
                if bias and len(lmap[lname].blobs) > 1:
                    self.models[lname].bias.data.copy_(
                        torch.from_numpy(np.array(lmap[lname].blobs[1].data)))
                    #print("convlution %s has bias" % lname)
                i = i + 1
            elif ltype == 'BatchNorm':
                print('load weights %s' % lname)
                self.models[lname].running_mean.copy_(
                    torch.from_numpy(
                        np.array(lmap[lname].blobs[0].data) /
                        lmap[lname].blobs[2].data[0]))
                self.models[lname].running_var.copy_(
                    torch.from_numpy(
                        np.array(lmap[lname].blobs[1].data) /
                        lmap[lname].blobs[2].data[0]))
                i = i + 1
            elif ltype == 'Scale':
                print('load weights %s' % lname)
                self.models[lname].weight.data.copy_(
                    torch.from_numpy(np.array(lmap[lname].blobs[0].data)))
                self.models[lname].bias.data.copy_(
                    torch.from_numpy(np.array(lmap[lname].blobs[1].data)))
                i = i + 1
            elif ltype == 'Normalize':
                print('load weights %s' % lname)
                self.models[lname].weight.data.copy_(
                    torch.from_numpy(np.array(lmap[lname].blobs[0].data)))
                i = i + 1
            elif ltype == 'InnerProduct':
                print('load weights %s' % lname)
                if type(self.models[lname]) == nn.Sequential:
                    self.models[lname][1].weight.data.copy_(
                        torch.from_numpy(np.array(
                            lmap[lname].blobs[0].data)).view_as(
                                self.models[lname][1].weight))
                    if len(lmap[lname].blobs) > 1:
                        self.models[lname][1].bias.data.copy_(
                            torch.from_numpy(
                                np.array(lmap[lname].blobs[1].data)))
                else:
                    self.models[lname].weight.data.copy_(
                        torch.from_numpy(np.array(lmap[lname].blobs[0].data)))
                    if len(lmap[lname].blobs) > 1:
                        self.models[lname].bias.data.copy_(
                            torch.from_numpy(
                                np.array(lmap[lname].blobs[1].data)))
                i = i + 1
            else:
                if not ltype in SUPPORTED_LAYERS:
                    print('load_weights: unknown type %s' % ltype)
                i = i + 1
def as_blob(array):
    blob = pb2.BlobProto()
    blob.shape.dim.extend(array.shape)
    blob.data.extend(array.astype(float).flat)
    return blob
예제 #6
0
    def mean_blob_fn(self):
        mean_blob = caffe_pb2.BlobProto()
        with open('../mean.binaryproto') as f:
            mean_blob.ParseFromString(f.read())

        return mean_blob
def binartproto2npy(mean_file):
    blob = caffe_pb2.BlobProto()
    blob.ParseFromString(open(mean_file, 'rb').read())
    arr = np.array(caffe.io.blobproto_to_array(blob))
    out = arr[0]
    return out
예제 #8
0
def load_mean_binary(mean_file):
    blob = caffe_pb2.BlobProto()
    bin_mean = open(mean_file, 'rb').read()  #proto file
    blob.ParseFromString(bin_mean)
    arr_mean = caffe.io.blobproto_to_array(blob)
    return arr_mean[0]
    n = 49
    dxdy.append([-abs(a1[0] - a2[0]) / n, abs(a1[1] - a2[1]) / n])

dxdy_2 = []
for i in range(1, len(pointers_lat_2) - 1):
    direct = 2
    p1 = label2coord(i - 1)
    p2 = label2coord(i)
    p3 = label2coord(i + 1)
    a1 = [abs(p1[0] + p2[0]) / 2, abs(p1[1] + p2[1]) / 2]
    a2 = [abs(p2[0] + p3[0]) / 2, abs(p2[1] + p3[1]) / 2]
    n = 49
    dxdy_2.append([abs(a1[0] - a2[0]) / n, -abs(a1[1] - a2[1]) / n])

#Read mean images, caffe model architectures and their trained weights
mean_blob = caffe_pb2.BlobProto()
with open('./input/SFMV/mean.binaryproto') as f:
    mean_blob.ParseFromString(f.read())
mean_array = np.asarray(mean_blob.data, dtype=np.float32).reshape(
    (mean_blob.channels, mean_blob.height, mean_blob.width))
net = caffe.Net('./caffe/SFMV/deploy.prototxt',
                './caffe/SFMV/googlenet_train_iter_186000.caffemodel',
                caffe.TEST)

#Define image transformers
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
transformer.set_mean('data', mean_array)
transformer.set_transpose('data', (2, 0, 1))

#Read mean images, caffe model architectures and their trained weights
mean_blob_way = caffe_pb2.BlobProto()
예제 #10
0
 def _load_binaryproto(file):
     blob = caffe_pb2.BlobProto()
     data = open(file, 'rb').read()
     blob.ParseFromString(data)
     arr = np.array(caffe.io.blobproto_to_array(blob))
     return arr[0]