def print_net_info(model_path, phase, net_params): # disable caffe logging net = Net(model_path, phase=phase) # Blobs = Data, net_params.layer = Layers <=> wont be equal nb_layers = len(net_params.layer) print "Net architecture : ", model_path print " - layer name (layer type) : (batch size, feature dim, width, height) | number of filters, (filter dim, filter width, filter height), stride, padding" counter=0 for k, v in net.blobs.items(): params = [] if k in net.params: params = net.params[k][0].data.shape padding = None stride = None layer_type = None if nb_layers > 0: layer = net_params.layer[counter] layer_type = layer.type if layer_type == 'Convolution' or layer_type == 'Deconvolution': stride = layer.convolution_param.stride[0] if len(layer.convolution_param.stride._values) else 1 padding = layer.convolution_param.pad[0] if len(layer.convolution_param.pad._values) else 0 elif layer_type == 'Pooling': stride = layer.pooling_param.stride padding = layer.pooling_param.pad if padding and stride and layer_type is not None: print "%s (%s) : %s | %s, %s, stride=%s, padding=%s" %(k, layer_type, v.data.shape, params[0], params[1:], stride, padding) else: print "%s : %s | %s, %s" %(k, v.data.shape, params[0], params[1:]) else: print "%s : %s" %(k, v.data.shape) counter += 1
def build(self, network_filename='inception.prototxt'): """main method.""" network = self._build_network() print network with open(network_filename, 'w') as network_file: network_file.write(text_format.MessageToString(network)) return Net(network_filename)
def build(self): """main method.""" network = self._build_network() print network network_filename = 'inception.prototxt' with open(network_filename, 'w') as network_file: network_file.write(text_format.MessageToString(network)) net = Net(network_filename) net.set_phase_test() net.set_mode_gpu() print net.forward()
def build_network(self, netname, batchsize=1, numstep=24): """main method.""" if netname == 'rnn': network = self._build_rnn_network(batchsize=batchsize, numstep=numstep) else: print('unknown netname: %s' % netname) return network_filename = '%s_t%d_finetuning.prototxt' % (netname,numstep) print network with open(network_filename, 'w') as network_file: network_file.write(text_format.MessageToString(network)) return Net(network_filename)
def classify( image, model=_static_file("deploy.prototxt"), weights=_static_file("resnet_50_1by2_nsfw.caffemodel"), ): """ Determine the probability that an image is SFW or NSFW. Parameters ---------- image : a PIL ImageFile object Keyword Arguments ----------------- These arguments default to Yahoo's open_nsfw defaults. If you have your own trained models, you may pass the paths to the `.prototxt` and `.caffemodel` files. model : a string path to a Caffe model file (e.g. deploy.prototxt) weights : a string path to a Caffe weights file (e.g. caffenet.caffemodel) Returns ------- (sfw, nsfw) : a tuple with SFW and NSFW probabilities """ net = Net(model, 1, weights=weights) transformer = Transformer({ "data": net.blobs["data"].data.shape }) # Move image channels to outermost transformer.set_transpose("data", (2, 0, 1)) # Subtract the dataset-mean value in each channel transformer.set_mean("data", numpy.array([104, 117, 123])) # Rescale from [0, 1] to [0, 255] transformer.set_raw_scale("data", 255) # Swap channels from RGB to BGR transformer.set_channel_swap("data", (2, 1, 0)) sfw, nsfw = _process(_resize(image), net=net, transformer=transformer) return (sfw, nsfw)
X = np.rollaxis(bottom[0].data, 1, 4) batch_size, h, w, _ = X.shape num_p = h * w BY = bottom[1].data BY[:, 1:5] /= self.scale obj_score, Y, gt_class = process(X, BY) top[0].data[...] = obj_score top[1].data[...] = Y top[2].data[...] = gt_class def backward(self, top, propagate_down, bottom): pass if __name__ == '__main__': from caffe import Net, TRAIN import cv2 net = Net('od2.pt', 'od_train_iter_62692.caffemodel', TRAIN) for count in range(100): net.forward() Bx, score = reconstruct(net.blobs['bbox'].data, net.blobs['obj_score'].data, net.blobs['label'].data) img = visualize('data/pvtdb/VOC2007/JPEGImages/%06d.jpg' % count, \ Bx, net.blobs['data'].data, 32) cv2.imshow('%06d.jpg' % count, img) key = cv2.waitKey(0) cv2.destroyAllWindows() if key == 27: break
By = gt_boxes[gt_boxes[:,0] == n, 1:5] IOU = iou(Bx, By) print IOU.max(axis=0) return (Bx, score) def visualize(filename, Bx, data, scale): img = data[0].copy() img[0] += 103 img[1] += 116 img[2] += 123 img = np.array(np.rollaxis(img, 0, 3), \ dtype=np.uint8).copy() for bx in Bx * scale: cv2.rectangle(img, (bx[0], bx[1]), (bx[2], bx[3]), \ (0, 0, 255), 2) return img if __name__ == '__main__': from caffe import Net, TRAIN net = Net('caffe-materials/practice6/net-test.pt', 'caffe-materials/practice6/net_trained.cm', TRAIN) for count in range(100): net.forward() Bx, score = reconstruct(net.blobs['bbox'].data, net.blobs['obj_score'].data, net.blobs['label'].data) img = visualize('data/pvtdb/VOC2007/JPEGImages/%06d.jpg' % count, \ Bx, net.blobs['data'].data, 32) cv2.imshow('%06d.jpg' % count, img) key = cv2.waitKey(0) cv2.destroyAllWindows() if key == 27: break
#! python #coding=utf-8 import os, sys caffe_root = '/data/darwin-caffe-base/' # caffe的根目录 os.chdir(caffe_root) #os.chdir()用于改变当前工作目录到指定的路径 sys.path.insert(0, caffe_root + 'python') from caffe import Net, TEST import caffe import numpy as np net = Net('/data/mgn_caffe/resnet50/ResNet-50-deploy.prototxt', '/data/mgn_caffe/resnet50/ResNet-50-model.caffemodel', caffe.TEST) netNew = Net('/data/mgn_caffe/resnet50/ResNet-50-deploy-3.prototxt', caffe.TEST) for k1, v1 in netNew.params.items(): for k, v in net.params.items(): #print (k, v[0].data.shape) #print np.size(net.params[k]) for i in range(np.size(net.params[k])): if (k1 == k + '_3'): print(k1) netNew.params[k1][i].data[:] = np.copy( net.params[k][i].data[:]) for i in range(np.size(net.params[k])): if (k1 == k): print(k1)
imageToTest = cv.resize(oriImg, (0, 0), fx=scale, fy=scale, interpolation=cv.INTER_CUBIC) imageToTest_padded, pad = util.padRightDownCorner( imageToTest, model['stride'], model['padValue']) print(imageToTest_padded.shape) net.blobs['data'].reshape(*(1, 3, imageToTest_padded.shape[0], imageToTest_padded.shape[1])) # net.forward() # dry run net.blobs['data'].data[...] = np.transpose( np.float32(imageToTest_padded[:, :, :, np.newaxis]), (3, 2, 0, 1)) / 256 - 0.5 start_time = time.time() output_blobs = net.forward() print('At scale %d, The CNN took %.2f ms.' % (m, 1000 * (time.time() - start_time))) # extract outputs, resize, and remove padding heatmap = np.transpose( np.squeeze(net.blobs[list(output_blobs.keys())[1]].data), (1, 2, 0)) # output 1 is heatmaps heatmap = cv.resize(heatmap, (0, 0), fx=model['stride'], fy=model['stride'], interpolation=cv.INTER_CUBIC) heatmap = heatmap[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :] heatmap = cv.resize(heatmap, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv.INTER_CUBIC)
def visualize(filename, Bx, data, scale): img = data[0].copy() img[0] += 103 img[1] += 116 img[2] += 123 img = np.array(np.rollaxis(img, 0, 3), \ dtype=np.uint8).copy() for bx in Bx * scale: cv2.rectangle(img, (bx[0], bx[1]), (bx[2], bx[3]), \ (0, 0, 255), 2) return img if __name__ == '__main__': from caffe import Net, TRAIN net = Net('caffe-materials/practice6/net-test.pt', 'caffe-materials/practice6/net_trained.cm', TRAIN) for count in range(100): net.forward() Bx, score = reconstruct(net.blobs['bbox'].data, net.blobs['obj_score'].data, net.blobs['label'].data) img = visualize('data/pvtdb/VOC2007/JPEGImages/%06d.jpg' % count, \ Bx, net.blobs['data'].data, 32) cv2.imshow('%06d.jpg' % count, img) key = cv2.waitKey(0) cv2.destroyAllWindows() if key == 27: break
#! python #coding=utf-8 import sys from caffe import Net caffe_root = your / caffe / root sys.path.insert(0, caffe_root + 'python') n_orig = Net('orig_model.prototxt', 'orig_model.caffemodel', 0) n_new = Net('new_model.prototxt', 'orig_model.caffemodel', 0) for orig_name in n_orig.params: #n_new.params[orig_name + '_p'] = n_orig.params[orig_name] n_new.params[new_name] = n_orig.params[orig_name] n_new.save('new_model.caffemodel')