Exemple #1
0
def collect_activation(selected_layer, selected_block):
    model_def = '/ssd/yqian/prune/model/reid/deploy_baseline.prototxt'
    model_weights = '/ssd/yqian/prune/model/body_reid_general_npair_caffe_cpu_ctf_20190925_v010002/npair_may_to_aug_ctf_all_stores_finetune_full_year_iter_44000.caffemodel'

    # load net
    caffe.set_device(0)
    caffe.set_mode_gpu()
    net = caffe.Net(model_def, model_weights, caffe.TEST)
    # load the mean ImageNet image (as distributed with Caffe) for subtraction
    mean_value = np.array([104, 117, 123], dtype=float)

    sample_num = 2000
    act_mean = {}
    layers = [
        '2a', '2b', '2c', '3a', '3b', '3c', '3d', '4a', '4b', '4c', '4d', '4e',
        '4f', '5a', '5b', '5c'
    ]
    data_list = np.loadtxt(
        '/ssd/yqian/prune/dataset/data/test_data/eval_CTF_beijing_xhm_20181207_label_finish_revision.txt',
        dtype=str)
    img_index = random.sample(range(len(data_list)), sample_num)
    # f = open('/ssd/yqian/prune/dataset/data/train_all_new.txt')
    for file_index in img_index:
        # offset = random.randrange(2e7)
        # f.seek(offset, 0)
        # line = f.readline()
        # time_start = time.time()
        # while len(line) < 2:
        #     offset = random.randrange(2e7)
        #     f.seek(offset, 0)
        #     line = f.readline()
        # try:
        #     file_path = '/ssd/yqian/prune/dataset/data/' + line.split()[0]
        # except IndexError:
        #     print('error: ', len(line))
        # im = cv2.imread(file_path)
        # while im is None:
        #     offset = random.randrange(2e7)
        #     f.seek(offset, 0)
        #     line = f.readline()
        #     while len(line) < 2:
        #         offset = random.randrange(2e7)
        #         f.seek(offset, 0)
        #         line = f.readline()
        #     try:
        #         file_path = '/ssd/yqian/prune/dataset/data/' + line.split()[0]
        #     except IndexError:
        #         print('error: ', len(line))
        #     im = cv2.imread(file_path)
        # print(line.split()[0])
        file_path = '/ssd/yqian/prune/dataset/data/test_data/all/' + data_list[
            file_index][0]
        im = cv2.imread(file_path)
        im = resize_image_with_padding(im, (384, 128))
        im -= mean_value
        im = np.transpose(im, (2, 0, 1))  # HWC -> CHW
        im = np.reshape(im, (1, 3, 384, 128))  #CHW ->NCHW

        # shape for input (data blob is N x C x H x W), set data
        # center crop
        # im = im[:, 16:240, 16:240]
        net.blobs['data'].reshape(*im.shape)
        net.blobs['data'].data[...] = im
        # run net and take argmax for prediction
        net.forward()

        for i in range(len(selected_layer)):
            for j in range(len(selected_block)):
                if selected_block[j] == 1:
                    output_layer = 'res' + layers[
                        selected_layer[i]] + '_branch2a'
                else:
                    output_layer = 'res' + layers[
                        selected_layer[i]] + '_branch2b'
                activation = net.blobs[output_layer].data
                if output_layer not in act_mean:
                    act_mean[output_layer] = [
                        np.mean(activation, axis=(0, 2, 3)).tolist()
                    ]
                else:
                    act_mean[output_layer].append(
                        np.mean(activation, axis=(0, 2, 3)).tolist())
    for key in act_mean:
        layer_act = act_mean[key]
        act_mean[key] = np.sum(np.abs(np.array(layer_act)), axis=0).tolist()
        act_mean[key] = float(cal_corrcoef(act_mean[key]))
    print(act_mean)
    with open('act_mean.json', 'w') as f:
        json.dump(act_mean, f)
    print('Called with args:')
    print(args)

    if args.cfg_file is not None:
        cfg_from_file(args.cfg_file)
    if args.set_cfgs is not None:
        cfg_from_list(args.set_cfgs)

    cfg.GPU_ID = args.gpu_id

    print('Using config:')
    pprint.pprint(cfg)

    while not os.path.exists(args.caffemodel) and args.wait:
        print('Waiting for {} to exist...'.format(args.caffemodel))
        time.sleep(10)

    caffe.set_mode_gpu()
    caffe.set_device(args.gpu_id)
    net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST)
    net.name = os.path.splitext(os.path.basename(args.caffemodel))[0]

    imdb = get_imdb(args.imdb_name)
    imdb.competition_mode(args.comp_mode)

    if not cfg.TEST.HAS_RPN:
        imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD)

    test_net(net, imdb, max_per_image=args.max_per_image, vis=args.vis)
Exemple #3
0
if not os.path.exists(out_json_dir):
    os.makedirs(out_json_dir)

# Load lstm data
if twitter:
    lstm_scores = {}
    for line in open(lstm_scores_path, 'r'):
        lstm_scores[line.split(',')[0]] = float(line.split(',')[1])

img_paths = []
for file in os.listdir(json_dir):
    img_paths.append(img_dir + file.replace('.json', '.jpg'))

# load net
net = caffe.Net('deploy.prototxt',
                '../../../datasets/COCO-Text/fcn8s-atonce.caffemodel',
                caffe.TEST)

print 'Filtering ...'

count = 0
start = time.time()
dicardedByLSTM = 0
for img_path in img_paths:

    # LSTM discarding
    if twitter and random.randint(0, 100) > lstm_filtering_probability:
        try:
            if lstm_scores[img_path.split('/')[-1].split('.')[0]] < lstm_th:
                print("Img discarded by LSTM")
                dicardedByLSTM += 1
        result_dict['Top-1 Class'] = label_list[int(
            index_list[-1])].split(' ')[1]

        result_dict['Confidence'] = [str(i) for i in list(output_prob)]
        lst_result.append(result_dict)
    return lst_result


if __name__ == '__main__':
    args = parser()
    caffe.set_mode_gpu()
    caffe.set_device(args.gpu_id)

    net_cls = caffe.Net(
        "lib/models/deploy.prototxt",
        "lib/models/se-res50-hiv-v0.3-t1_iter_78000.caffemodel", caffe.TEST)

    cls_list = np.loadtxt('lib/labels.lst', str, delimiter='\n')
    test_list = np.loadtxt(args.img_file, str, delimiter='\n')

    dict_result = {}

    for i in range(len(test_list)):
        starttime = time.time()
        dict_result_tmp = single_img_process(net_cls, args.data_root,
                                             test_list[i], cls_list)
        endtime = time.time()
        print 'speed: {:.3f}s / iter'.format(endtime - starttime)
        for item in dict_result_tmp:
            dict_result[os.path.basename(item['File Name'])] = item
    #caffe.set_mode_cpu()
    # Use GPU for inference
    caffe.set_mode_gpu()

    # load nifti file
    imagedata = nib.load(options.imagefile)
    numpyimage = imagedata.get_data()

    # create empty segmentation
    segmentation = np.zeros(imagedata.shape, dtype=np.uint8)
    pred_step2 = np.zeros((388, 388, segmentation.shape[2]),
                          dtype=np.uint8)  # hard code unet expected dimensions

    # ### Load network prototxt and weights and perform prediction ###
    # #### Step 1 ####
    net1 = caffe.Net(STEP1_DEPLOY_PROTOTXT, STEP1_MODEL_WEIGHTS, caffe.TEST)
    print "%d slices: " % segmentation.shape[2]
    # loop through all slices
    for iii in range(segmentation.shape[2]):
        #for iii in [30]:
        print "%d  " % iii,
        # NN expect liver in top left
        imageslice = numpyimage[..., iii]
        img_p = step1_preprocess_img_slice(imageslice.transpose())
        #imshow(img_p,title=['Test image'])

        # Predict
        net1.blobs['data'].data[0, 0, ...] = img_p
        predprob = net1.forward()['prob'][0, 1]
        pred = predprob > 0.5
        #print pred.shape
#!/usr/bin/env python

import caffe
import argparse


def parse_args():
    parser = argparse.ArgumentParser(
        description="Export caffe weights to h5 format.")
    parser.add_argument("prototxt", help="Path to prototxt file.")
    parser.add_argument("caffemodel", help="Path to weights file.")
    parser.add_argument("output", help="Path to output weights.")

    return parser.parse_args()


if __name__ == "__main__":
    args = parse_args()

    net = caffe.Net(args.prototxt, caffe.TEST, weights=args.caffemodel)
    net.save_hdf5(args.output)

    print("done.")
    #model_def = '/opt/zhangjing/TextBoxes/jobs2/deploy.prototxt'
    #model_weights = '/opt/zhangjing/TextBoxes/jobs2/VGG_text_longer_conv_300x300_iter_190000.caffemodel'

    #scales=((300,300),)
    #scales=((1600,1600),)

    # IMPORTANT: If use mutliple scales in the paper, you need an extra non-maximum superession for the results
    scales=((300,300),(700,700),(700,500),(700,300),(1600,1600)) #多尺度测试
    #scales=((300,300),)

    import caffe
    #caffe.set_device(0)
    caffe.set_mode_cpu()

    net = caffe.Net(model_def,      # defines the structure of the model
                    model_weights,  # contains the trained weights
                    caffe.TEST)     # use test mode (e.g., don't perform dropout)

    # input preprocessing: 'data' is the name of the input blob == net.inputs[0]
    print(net.blobs['data'].data.shape)

    #test_list=open('/opt/yushan/TextBoxes/data/icdar13/test_list.txt')
    save_dir='/opt/zhangjing/TextBoxes/examples/TextBoxes/test_bb/'
    orig_image_dir = '/opt/zhangjing/TextBoxes/examples/TextBoxes/test_chinese'


    from os import path
    files = [x for x in os.listdir(orig_image_dir) if path.isfile(orig_image_dir+os.sep+x)]
    num = 0
    for line in files:
        num = num + 1
    return img


'''
Reading mean image, caffe model and its weights 
'''
#Read mean image
mean_blob = caffe_pb2.BlobProto()
with open('/home/ubuntu/deeplearning-cats-dogs-tutorial/input/mean.binaryproto'
          ) as f:
    mean_blob.ParseFromString(f.read())
mean_array = np.asarray(mean_blob.data, dtype=np.float32).reshape(
    (mean_blob.channels, mean_blob.height, mean_blob.width))

#Read model architecture and trained model's weights
net = caffe.Net('~/autobot/caffenet_deploy_2.prototxt',
                '~/autobot/caffe_model_2_iter_10000.caffemodel', caffe.TEST)

#Define image transformers
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
transformer.set_mean('data', mean_array)
transformer.set_transpose('data', (2, 0, 1))
'''
Making predicitions
'''
##Reading image paths
test_img_paths = [img_path for img_path in glob.glob("../input/test1/*jpg")]

test_ids = []
preds = []

#Making predictions
    for i in xrange(7):
        _resnet_block('3b{}'.format(i + 1), n, bottom, 128)
        bottom = n['res3b{}_relu'.format(i + 1)]

    _resnet_block('4a', n, bottom, 256, branch1=True)
    bottom = n.res4a_relu
    for i in xrange(35):
        _resnet_block('4b{}'.format(i + 1), n, bottom, 256)
        bottom = n['res4b{}_relu'.format(i + 1)]

    _resnet_block('5a', n, bottom, 512, branch1=True)
    _resnet_block('5b', n, n.res5a_relu, 512)
    _resnet_block('5c', n, n.res5b_relu, 512)

    n.pool5 = L.Pooling(n.res5c_relu,
                        kernel_size=7,
                        stride=1,
                        pool=P.Pooling.AVE)


if __name__ == '__main__':
    n = caffe.NetSpec()
    n.data = L.DummyData(shape=[dict(dim=[1, 3, 224, 224])])
    resnet152(n, n.data)
    n.fc1000 = L.InnerProduct(n.pool5, num_output=1000)
    n.prob = L.Softmax(n.fc1000)
    with open('tmp.prototxt', 'w') as f:
        f.write(str(n.to_proto()))
    net = caffe.Net('tmp.prototxt',
                    'models/resnet152/ResNet-152-model.caffemodel', caffe.TEST)
    print(net.blobs)
import numpy as np
import sys
import caffe
import copy

net = caffe.Net('yolo_conv21.prototxt', caffe.TEST)
print("blobs {}\nparams {}".format(net.blobs.keys(), net.params.keys()))
params = ['conv21_1', 'conv21_2', 'bn21_1', 'bn21_2', 'scale21_1', 'scale21_2']

conv_params = {pr: (net.params[pr][0].data) for pr in params}

for fc in params:
    print('{} weights are {} dimensional and biases are 0 dimensional'.format(
        fc, conv_params[fc].shape))

net_ref = caffe.Net('yolo_conv21_ref.prototxt', 'yolo_conv21_ref.caffemodel',
                    caffe.TEST)
print("for the reference model: blobs {}\nparams {}".format(
    net_ref.blobs.keys(), net_ref.params.keys()))
params_ref = ['conv21', 'bn21', 'scale21']
conv_params_ref = {pr: (net_ref.params[pr][0].data) for pr in params_ref}

for fc in params_ref:
    print(
        'for reference model: {} weights are {} dimensional and biases are 0 dimensional'
        .format(fc, conv_params_ref[fc].shape))

for param in net.params.keys():

    if param not in params:
        for i in range(len(net.params[param])):
Exemple #11
0
        fname_mask = "{}.{:04d}.mask.txt".format(im_name, instance_idx)
        fpath_mask = os.path.join(dir, fname_mask)
        # TODO(andrei): Consider compressing this; you may be able to save
        # a TON of space.
        np.savetxt(fpath_mask, instance['mask'].astype(np.bool_))

if __name__ == '__main__':
    args = parse_args()
    test_prototxt = args.prototxt
    test_model = args.caffemodel

    caffe.set_mode_gpu()
    caffe.set_device(args.gpu_id)
    cfg.GPU_ID = args.gpu_id
    net = caffe.Net(test_prototxt, test_model, caffe.TEST)

    # Warm up for the first two images
    im = 128 * np.ones((300, 500, 3), dtype=np.float32)
    for i in xrange(2):
        _, _, _ = im_detect(im, net)

    demo_dir = args.input
    demo_result_dir = args.output
    if not os.path.exists(demo_result_dir):
        os.mkdir(demo_result_dir)

    fig = plt.figure()
    do_resize = (args.inference_width != -1 and args.inference_height != -1)
    if do_resize:
        print("Will resize input images to {}x{} before feeding them into the "
Exemple #12
0
    # give fc8 the name specified by argument `classifier_name`
    n.__setattr__(classifier_name, fc8)
    if not train:
        n.probs = L.Softmax(fc8)
    if label is not None:
        n.label = label
        n.loss = L.SoftmaxWithLoss(fc8, n.label)
        n.acc = L.Accuracy(fc8, n.label)
    # write the net to a temporary file and return its filename
    with tempfile.NamedTemporaryFile(delete=False) as f:
        f.write(str(n.to_proto()))
        return f.name

dummy_data = L.DummyData(shape=dict(dim=[1, 3, 227, 227]))
imagenet_net_filename = caffenet(data=dummy_data, train=False)
imagenet_net = caffe.Net(imagenet_net_filename, weights, caffe.TEST)

def style_net(train=True, learn_all=False, subset=None):
    if subset is None:
        subset = 'train' if train else 'test'
    source = caffe_root + 'data/flickr_style/%s.txt' % subset
    transform_param = dict(mirror=train, crop_size=227,
        mean_file=caffe_root + 'data/ilsvrc12/imagenet_mean.binaryproto')
    style_data, style_label = L.ImageData(
        transform_param=transform_param, source=source,
        batch_size=50, new_height=256, new_width=256, ntop=2)
    return caffenet(data=style_data, label=style_label, train=train,
                    num_classes=NUM_STYLE_LABELS,
                    classifier_name='fc8_flickr',
                    learn_all=learn_all)
    for label in labels:
        found = False
        for i in xrange(0, num_labels):
            if label == labelmap.item[i].label:
                found = True
                labelnames.append(labelmap.item[i].display_name)
                break
        assert found == True
    return labelnames


loc_model_def = '/media/gsy/D02497132496FC22/Users/Administrator/Desktop/zfsb/location/deploy.prototxt'
loc_model_weights = '/media/gsy/D02497132496FC22/Users/Administrator/Desktop/zfsb/location/VGG_VOC0712_SSD_512x512_iter_116000.caffemodel'

net_loc = caffe.Net(
    loc_model_def,  # The structure of the model
    loc_model_weights,  # The trained weights
    caffe.TEST)  # Use test mode

# input preprocessing: 'data' is the name of the input blob == net.inputs[0]
transformer_loc = caffe.io.Transformer(
    {'data': net_loc.blobs['data'].data.shape})
transformer_loc.set_transpose('data', (2, 0, 1))
transformer_loc.set_mean('data', np.array([104, 117, 123]))  # mean pixel
transformer_loc.set_raw_scale(
    'data', 255
)  # the reference model operates on images in [0,255] range instead of [0,1]
transformer_loc.set_channel_swap(
    'data',
    (2, 1, 0))  # the reference model has channels in BGR order instead of RGB

labelmap_file_loc = '/media/gsy/D02497132496FC22/Users/Administrator/Desktop/zfsb/location/labelmap_voc.prototxt'
im_ht = 227
im_wd = 227
transformer = caffe.io.Transformer({'data': (1,3,im_ht,im_wd)})
transformer.set_transpose('data', (2,0,1))
transformer.set_raw_scale('data', 255)  # the reference model operates on images in [0,255] range instead of [0,1]
transformer.set_channel_swap('data', (2,1,0))  # the reference model has channels in BGR order instead of RGB
transformer.set_mean('data', np.array([104.00699,116.66877,122.67892]))

netfile = 'models/deploy.prototxt'
modelfile = 'models/impart.caffemodel'
modelfile2 = 'models/autoenc.caffemodel'
inpath = 'data/chair.jpg'
outfpath = 'output/chair.h5'

net = caffe.Net(netfile, modelfile, caffe.TEST)
if len(modelfile2):
  net.copy_from(modelfile2)

im = plt.imread(inpath)
if np.shape(im)[2] > 3:
  im = im[:, :, :3]
if np.max(im) > 1:
  im = im / 255.0

in_ = transformer.preprocess('data', im)
out = predModel_3dnw(net, in_)

with h5py.File(outfpath, 'w') as f:
  f.create_dataset('reconst', data=out, compression='gzip', compression_opts=9)
transformAndVisVoxels.saveVisSnapshotMayavi(out, outfpath + '.jpg')
Exemple #15
0
def rrpn_test_ICDAR2015_modelfusion(models, mode):

    cfg.TEST.HAS_RPN = True  # Use RPN for proposals

    NETS = {'rrpn': ('VGG16', )}

    # rrpn only
    prototxt = os.path.join(cfg.RRPN_MODELS_DIR, NETS['rrpn'][0],
                            'faster_rcnn_end2end', 'test.prototxt')

    results_matrix = []

    roidb = test_ICDAR2015(mode)

    for model in models:

        day = model["day"]
        exp = model["exp"]
        model_name = model["model_name"]

        caffemodel = os.path.join(cfg.DATA_DIR, 'faster_rcnn_models',
                                  day + "/" + exp + "/" + model_name)

        print caffemodel

        if not os.path.isfile(caffemodel):
            raise IOError(('{:s} not found.\nDid you run ./data/script/'
                           'fetch_faster_rcnn_models.sh?').format(caffemodel))

        caffe.set_mode_gpu()
        caffe.set_device(0)
        cfg.GPU_ID = 0

        net = caffe.Net(prototxt, caffemodel, caffe.TEST)

        print '\n\nLoaded network {:s}'.format(caffemodel)

        # Warmup on a dummy image
        #im = 128 * np.ones((300, 500, 3), dtype=np.uint8)
        #for i in xrange(2):
        # 	_, _= rotation_demo.r_im_detect(net, im)

        im_names = []  #['IMG_0030.JPG','IMG_0059.JPG','IMG_0063.JPG']

        cfg.TEST.RATIO_GROUP = model["ratio"]

        gt_boxes = []

        for rdb in roidb:
            im_names.append(rdb['image'])
            gt_boxes.append([0, 0, 0, 0, 0])

        if not os.path.isdir("./result/" + day + "/"):
            os.mkdir("./result/" + day + "/")
        if not os.path.isdir("./result/" + day + "/" + exp + "/"):
            os.mkdir("./result/" + day + "/" + exp + "/")
        if not os.path.isdir("./result/" + day + "/" + exp + "/" + model_name +
                             "/"):
            os.mkdir("./result/" + day + "/" + exp + "/" + model_name + "/")
        if not os.path.isdir("./result/" + day + "/" + exp + "/" + model_name +
                             "/" + mode + "/"):
            os.mkdir("./result/" + day + "/" + exp + "/" + model_name + "/" +
                     mode + "/")

        if not os.path.isdir("./result/" + day + "/" + exp + "/" + model_name +
                             "/" + mode + "_origin/"):
            os.mkdir("./result/" + day + "/" + exp + "/" + model_name + "/" +
                     mode + "_origin/")

        result_queue = []

        for im_idx in range(len(im_names)):
            print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
            print 'Demo for data/demo/{}'.format(im_names[im_idx])
            print "model: ", model_name
            #print im_names[im_idx], gt_boxes[im_idx]

            result = rotation_demo.demo(
                net, im_names[im_idx], gt_boxes[im_idx], "./result/" + day +
                "/" + exp + "/" + model_name + "/" + mode + "/", "./result/" +
                day + "/" + exp + "/" + model_name + "/" + mode + "_origin/",
                model["conf"])

            im_file = im_names[im_idx]
            im = cv2.imread(im_file)
            im_height = im.shape[0]
            im_width = im.shape[1]
            result_queue.append({
                "result": result,
                "image": im_file,
                "height": im_height,
                "width": im_width
            })
            #print result_queue[0]

        results_matrix.append({
            "result_queue": result_queue,
            "day": day,
            "exp": exp,
            "model_name": model_name
        })

        #dets = det_fusion(result_queue)
        #rotation_demo.write_result_ICDAR(im_names[im_idx], dets, 0.9,"./result/" + day + "/" + exp + "/" + model_name + "/" + mode + "/", im_height, im_width)

    print "fusioning..."

    for i in range(len(roidb)):
        bboxes_arr = []

        im_name = ""
        day = ""
        exp = ""
        model_name = ""
        im_height = 0
        im_width = 0

        for results_queue in results_matrix:
            bboxes_arr.extend(results_queue["result_queue"][i]["result"])

            im_name = results_queue["result_queue"][i]["image"]
            day = results_queue["day"]
            exp = results_queue["exp"]
            model_name = results_queue["model_name"]
            im_height = results_queue["result_queue"][i]["height"]
            im_width = results_queue["result_queue"][i]["width"]

        dets = det_fusion(bboxes_arr)

        rotation_demo.write_result_ICDAR(
            im_name, dets, 0.7, "./result/" + day + "/" + exp + "/" +
            model_name + "/" + mode + "/", im_height, im_width)
    print "done"
def main():
    dir = '/home/chris/PycharmProjects/loss-visualization/models/quick_learn'
    DB_PATH = '/home/chris/caffe/examples/cifar10/cifar10_test_lmdb'
    MODEL_FILE = os.path.join(dir, 'solver.prototxt')
    PRETRAINED = os.path.join(dir, 'model.caffemodel')
    MEAN_FILE_PATH = os.path.join(dir, 'mean.binaryproto')
    steps = 51  # Length of side of the square grid
    net = caffe.Net(MODEL_FILE, PRETRAINED, caffe.TRAIN)
    lmdb_env = lmdb.open(DB_PATH)
    lmdb_txn = lmdb_env.begin()
    lmdb_cursor = lmdb_txn.cursor()
    mean_image_binary = open(MEAN_FILE_PATH, 'rb').read()
    blob = caffe.proto.caffe_pb2.BlobProto()
    blob.ParseFromString(mean_image_binary)
    mean_image = np.array(caffe.io.blobproto_to_array(blob))
    mean_image = np.reshape(mean_image, newshape=(3, 32, 32))
    caffe.set_mode_gpu()
    error_category = np.zeros(shape=(10, 1))
    count = 0
    correct = 0
    wrongly_classified_images = []
    wrongly_classified_labels = []
    for key, value in lmdb_cursor:
        count = count + 1
        datum = caffe.proto.caffe_pb2.Datum()
        datum.ParseFromString(value)
        label = int(datum.label)
        image = caffe.io.datum_to_array(datum)
        image = (image - mean_image)
        out = net.forward(data=np.asarray([image]))
        predicted_label = out['prob'].argmax()
        if label != predicted_label:
            error_category[label] += 1
            wrongly_classified_images.append(image)
            wrongly_classified_labels.append(label)

        else:
            correct += 1

        if count % 100 == 0:
            print(str(count) + 'completed')

    print(error_category)
    lmdb_env.close()

    print(
        str(correct) + " out of " + str(count) + " were classified correctly")

    # Get the normalized Gaussian vectors for the total number of parameters
    # Vector count is currently 2 because only two vectors are needed in x and y directions
    vector_count = 2

    # Calculate the total parameter count in the network
    # Calculate the Frobenius norm/Euclidean norm of the network
    # Square root of sum of absolute squares of all the weights in the network
    param_count, euclidean_norm = calculate_param_count(net)
    print(param_count)
    gaussian_vec = get_gaussian_vector(param_count, vector_count)

    # Normalize the Gaussian Vector with the norm
    vectors_norms = calculate_norm(gaussian_vec)
    normalized_vectors = np.divide(
        gaussian_vec, np.reshape(vectors_norms, [len(vectors_norms), 1]))

    # Multiply the vectors with the norm of the Network
    directional_vectors = np.multiply(normalized_vectors, euclidean_norm)

    # save the matrix of directional_vectors
    # save the numpy array
    np.save('directional_vectors', directional_vectors)

    loss, accuracy = create_loss_landscape(
        net,
        directional_vectors,
        dir,
        steps=steps,
        wrongly_classified_images=wrongly_classified_images,
        wrongly_classified_labels=wrongly_classified_labels,
        mean_path=MEAN_FILE_PATH)

    x = y = np.linspace(-1.0, 1.0, num=loss.shape[0])
    X, Y = np.meshgrid(x, y)
    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')
    ax.plot_surface(X, Y, loss)
    plt.show()

    np.savetxt(os.path.join(dir, 'test_error_loss.csv'), loss, delimiter=",")
    np.savetxt(os.path.join(dir, 'Test_error_accuracy.csv'),
               accuracy,
               delimiter=",")
    print('Process Completed')
Exemple #17
0
def rrpn_test_BOT(day, exp, model_name, mode):

    cfg.TEST.HAS_RPN = True  # Use RPN for proposals

    NETS = {'rrpn': ('VGG16', day + "/" + exp + "/" + model_name)}

    # rrpn only
    prototxt = os.path.join(cfg.RRPN_MODELS_DIR, NETS['rrpn'][0],
                            'faster_rcnn_end2end', 'test.prototxt')

    caffemodel = os.path.join(cfg.DATA_DIR, 'faster_rcnn_models',
                              NETS['rrpn'][1])

    print caffemodel

    if not os.path.isfile(caffemodel):
        raise IOError(('{:s} not found.\nDid you run ./data/script/'
                       'fetch_faster_rcnn_models.sh?').format(caffemodel))

    caffe.set_mode_gpu()
    caffe.set_device(0)
    cfg.GPU_ID = 0

    net = caffe.Net(prototxt, caffemodel, caffe.TEST)

    print '\n\nLoaded network {:s}'.format(caffemodel)

    # Warmup on a dummy image
    #im = 128 * np.ones((300, 500, 3), dtype=np.uint8)
    #for i in xrange(2):
    # 	_, _= rotation_demo.r_im_detect(net, im)

    im_names = []  #['IMG_0030.JPG','IMG_0059.JPG','IMG_0063.JPG']

    roidb = get_bot_img(mode)

    gt_boxes = []

    for rdb in roidb:
        im_names.append(rdb['image'])
        gt_boxes.append([0, 0, 0, 0, 0])

    if not os.path.isdir("./result/" + day + "/"):
        os.mkdir("./result/" + day + "/")
    if not os.path.isdir("./result/" + day + "/" + exp + "/"):
        os.mkdir("./result/" + day + "/" + exp + "/")
    if not os.path.isdir("./result/" + day + "/" + exp + "/" + model_name +
                         "/"):
        os.mkdir("./result/" + day + "/" + exp + "/" + model_name + "/")
    if not os.path.isdir("./result/" + day + "/" + exp + "/" + model_name +
                         "/" + mode + "/"):
        os.mkdir("./result/" + day + "/" + exp + "/" + model_name + "/" +
                 mode + "/")

    if not os.path.isdir("./result/" + day + "/" + exp + "/" + model_name +
                         "/" + mode + "_origin/"):
        os.mkdir("./result/" + day + "/" + exp + "/" + model_name + "/" +
                 mode + "_origin/")

    for im_idx in range(len(im_names)):
        print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
        print 'Demo for data/demo/{}'.format(im_names[im_idx])
        print "model: ", model_name
        #print im_names[im_idx], gt_boxes[im_idx]
        rotation_demo.demo(
            net, im_names[im_idx], gt_boxes[im_idx], "./result/" + day + "/" +
            exp + "/" + model_name + "/" + mode + "/", "./result/" + day +
            "/" + exp + "/" + model_name + "/" + mode + "_origin/")
import os
caffe_root = '../../caffe-portraitseg/'
import sys
sys.path.insert(0, caffe_root + 'python')

import numpy as np
from matplotlib import pyplot as plt
import caffe
caffe.set_mode_cpu()

MODEL_FILE = '../our_models/deploy_3channels.prototxt'
PRETRAINED = '../our_models/bgr.caffemodel'
net = caffe.Net(MODEL_FILE, PRETRAINED, caffe.TEST)

net.blobs['data'].reshape(1, 3, 800, 600)


import scipy.io as scio

testlistPath = '../../data/testlist.mat'
outputPath = './Output_PortraitFCN/'

testlist = scio.loadmat(testlistPath)['testlist'][0]

for i in range(len(testlist)):
	if os.path.exists('../../data/portraitFCN_data/%05d.mat' % testlist[i]) is True:
		image = scio.loadmat('../../data/portraitFCN_data/%05d.mat' % testlist[i])['img']
		image = image.transpose((2,0,1))
		net.blobs['data'].data[...] = image
		output = net.forward() 
		res = output['upscore']
import numpy as np
import ROOT as rt
import lmdb

caffe.set_mode_gpu()

deploy_prototxt = "/home/taritree/software/caffe/models/lenet_uboone/lenet.prototxt"
test_train_prototxt = "/home/taritree/software/caffe/models/lenet_uboone/lenet_train_test.prototxt"
model = "/home/taritree/software/caffe/models/lenet_uboone/lenet_rmsprop_iter_10000.caffemodel"
weights = "/home/taritree/software/caffe/models/lenet_uboone/lenet_rmsprop_iter_10000.solverstate"
train_data = "/home/taritree/working/larbys/staged_data/uboone_singlep_train.db"
validate_data = "/home/taritree/working/larbys/staged_data/uboone_singlep_validate.db"

prototxt = test_train_prototxt
#prototxt = deploy_prototxt
net = caffe.Net(prototxt, model, caffe.TEST)

lmdb_name = validate_data
lmdb_env = lmdb.open(lmdb_name, readonly=True)
lmdb_txn = lmdb_env.begin()

cursor = lmdb_txn.cursor()

batchsize = 100
nbatches = 10

misslist = []

out = rt.TFile("out_netanalysis.root", "RECREATE")
herrmat = rt.TH2D("herrmat", ";truth label;decision label", 4, 0, 4, 4, 0, 4)
hclassacc = rt.TH1D("hclassacc", ";truth label;accuracy", 4, 0, 4)
Exemple #20
0
    x=getarry(net,data)
    for i in xrange(0,m):
        sock.send(struct.pack('f',x[i]))

    sock.close()
        
def getarry(net,choseImage):
    net.blobs['data'].data[...]  = transformer.preprocess('data', caffe.io.load_image(choseImage))
    out=net.forward()
    pool5Data=net.blobs['fc7'].data.reshape(1,-1)
    return pool5Data.reshape(-1)

caffe_root='/home/sjq/caffe/'
choseImage='123'
caffe.set_mode_gpu()
net = caffe.Net(caffe_root + 'models/bvlc_alexnet/deploy.prototxt',caffe_root + 'models/bvlc_alexnet/bvlc_alexnet.caffemodel',caffe.TEST)
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})  
transformer.set_transpose('data', (2,0,1))  
transformer.set_mean('data', np.load(caffe_root + 'python/caffe/imagenet/ilsvrc_2012_mean.npy').mean(1).mean(1)) # mean pixel  
transformer.set_raw_scale('data', 255)  # the reference model operates on images in [0,255] range instead of [0,1]  
transformer.set_channel_swap('data', (2,1,0)) # the reference model has channels in BGR order instead of RGB  
 # set net to batch size of 1 即输入为一张图片  
net.blobs['data'].reshape(1,3,227,227)  

s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.bind(('127.0.0.1',9998))
s.listen(5)
while True:
    sock,addr=s.accept()
    t=threading.Thread(target=tcplink,args=(sock,addr,net))
    t.start()
def darknet2caffe(cfgfile, weightfile, protofile, caffemodel):
    net_info = cfg2prototxt(cfgfile)
    save_prototxt(net_info , protofile, region=False)

    net = caffe.Net(protofile, caffe.TEST)
    params = net.params

    blocks = parse_cfg(cfgfile)

    #Open the weights file
    fp = open(weightfile, "rb")

    #The first 4 values are header information 
    # 1. Major version number
    # 2. Minor Version Number
    # 3. Subversion number 
    # 4. IMages seen 
    header = np.fromfile(fp, dtype = np.int32, count = 5)

    #fp = open(weightfile, 'rb')
    #header = np.fromfile(fp, count=5, dtype=np.int32)
    #header = np.ndarray(shape=(5,),dtype='int32',buffer=fp.read(20))
    #print(header)
    buf = np.fromfile(fp, dtype = np.float32)
    #print(buf)
    fp.close()

    layers = []
    layer_id = 1
    start = 0
    for block in blocks:
        if start >= buf.size:
            break

        if block['type'] == 'net':
            continue
        elif block['type'] == 'convolutional':
            batch_normalize = int(block['batch_normalize'])
            if block.has_key('name'):
                conv_layer_name = block['name']
                bn_layer_name = '%s-bn' % block['name']
                scale_layer_name = '%s-scale' % block['name']
            else:
                conv_layer_name = 'layer%d-conv' % layer_id
                bn_layer_name = 'layer%d-bn' % layer_id
                scale_layer_name = 'layer%d-scale' % layer_id

            if batch_normalize:
                start = load_conv_bn2caffe(buf, start, params[conv_layer_name], params[bn_layer_name], params[scale_layer_name])
            else:
                start = load_conv2caffe(buf, start, params[conv_layer_name])
            layer_id = layer_id+1
        elif block['type'] == 'depthwise_convolutional':
            batch_normalize = int(block['batch_normalize'])
            if block.has_key('name'):
                conv_layer_name = block['name']
                bn_layer_name = '%s-bn' % block['name']
                scale_layer_name = '%s-scale' % block['name']
            else:
                conv_layer_name = 'layer%d-dwconv' % layer_id
                bn_layer_name = 'layer%d-bn' % layer_id
                scale_layer_name = 'layer%d-scale' % layer_id

            if batch_normalize:
                start = load_conv_bn2caffe(buf, start, params[conv_layer_name], params[bn_layer_name], params[scale_layer_name])
            else:
                start = load_conv2caffe(buf, start, params[conv_layer_name])
            layer_id = layer_id+1
        elif block['type'] == 'connected':
            if block.has_key('name'):
                fc_layer_name = block['name']
            else:
                fc_layer_name = 'layer%d-fc' % layer_id
            start = load_fc2caffe(buf, start, params[fc_layer_name])
            layer_id = layer_id+1
        elif block['type'] == 'maxpool':
            layer_id = layer_id+1
        elif block['type'] == 'avgpool':
            layer_id = layer_id+1
        elif block['type'] == 'region':
            layer_id = layer_id + 1
        elif block['type'] == 'route':
            layer_id = layer_id + 1
        elif block['type'] == 'shortcut':
            layer_id = layer_id + 1
        elif block['type'] == 'softmax':
            layer_id = layer_id + 1
        elif block['type'] == 'cost':
            layer_id = layer_id + 1
	elif block['type'] == 'upsample':
	    layer_id = layer_id + 1
        else:
            print('unknow layer type %s ' % block['type'])
            layer_id = layer_id + 1
    print('save prototxt to %s' % protofile)
    save_prototxt(net_info , protofile, region=True)
    print('save caffemodel to %s' % caffemodel)
    net.save(caffemodel)
Exemple #22
0
    draw_boxes(true_boxes_max_avg, imgs, im, info1[0] + '_9.jpg')

    #===================
    #plt.savefig('heatmap/'+image_name.split('/')[-1])
    #在图像中画出检测到的人脸框

    plt.close()
    return out['prob'][0, map_idx]


if __name__ == "__main__":
    if not os.path.isfile(model_weight_fc):
        net_fc = convert_full_conv(model_define, model_weight, model_define_fc,
                                   model_weight_fc)
    else:
        net_fc = caffe.Net(model_define_fc, model_weight_fc, caffe.TEST)
    net_vf = caffe.Net(model_define, model_weight, caffe.TEST)

    #logging module
    logging.basicConfig(
        level=logging.DEBUG,
        format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
        datefmt='%m-%d %H:%M',
        filename='batch_detect.log',
        filemode='w')
    console = logging.StreamHandler()
    console.setLevel(logging.INFO)
    formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
    console.setFormatter(formatter)
    logging.getLogger('').addHandler(console)
    logging.info('begin...')
Exemple #23
0
params["output_resolution"] = "-1x-1"
params["net_resolution"] = "-1x" + str(defRes)
params["model_pose"] = "BODY_25"
params["alpha_pose"] = 0.6
params["scale_gap"] = 0.25
params["scale_number"] = len(scales)
params["render_threshold"] = 0.05
params["num_gpu_start"] = 0
params["disable_blending"] = False
params["model_folder"] = dir_path + "/../../../models/"
openpose = OpenPose(params)
caffe.set_mode_gpu()
caffe.set_device(0)
nets = []
for scale in scales:
    nets.append(caffe.Net(Param.prototxt, Param.caffemodel, caffe.TEST))
print("Net loaded")

# Test Function
first_run = True


def func(frame):

    # Get image processed for network, and scaled image
    imagesForNet, imagesOrig = OpenPose.process_frames(frame, defRes, scales)

    # Reshape
    global first_run
    if first_run:
        for i in range(0, len(scales)):
def main():
	sys.path.append('/imagenetdb3/abearman/caffe/python/')
	print "Importing caffe ..."
	import caffe

	val_file = open(VAL_FILE)
	ids = [line[:-1] for line in val_file]
	#from random import shuffle
	#shuffle(ids)
	
	overall_acc = 0
	accuracies = []

	# Load net
	net = caffe.Net(PROTOTEXT_FILE, CAFFE_MODEL, caffe.TEST)
	caffe.set_mode_gpu()
	caffe.set_device(1)

	num_intersection = {}; 
	num_union = {}; 
	current_iou = {}

	settings = ['regular','addbg','fixbg','removeclasses','fixboth'];  #,'removebg'
	#settings = ['addbg','regular'];
	for s in settings:
		num_intersection[s] = [0.0]*21;
		num_union[s] = [0.0]*21

	start_t = time.time();
	time_segm = 0;
	time_other = 0;
	for i in range(len(ids)):
		if (time.time() - start_t) > 10:
			print "iter:", i,"/",len(ids),"time_segm:",time_segm,"time_other:",time_other
			for s in settings:
				print '---'
				print '     ',s
				print current_iou[s]
				print "Mean: ", mean(current_iou[s])
			print ""
			print ""
			start_t = time.time()
			
		tt = time.time();
		image_path = IMAGES_DIR + ids[i] + '.jpg'
		gt_path = GT_DIR + ids[i] + '.png'
		
		obj_path = ''
		if i == 0:
			print 'obj_path:',obj_path

		predicted_segmentation = segment(image_path, net,obj_path)	
		time_segm += time.time()-tt;

		tt = time.time()
		for s in settings:
			bAddBg = False
			bRemoveBg = False
			bRemoveClasses = False

			if s == 'regular':
				pass
			elif s == 'addbg':
				bAddBg = True
			elif s == 'removebg':
				bRemoveBg = True
			elif s == 'fixbg':
				bAddBg = True
				bRemoveBg = True
			elif s == 'removeclasses':
				bRemoveClasses = True
			elif s == 'fixboth':
				bAddBg = True
				bRemoveBg = True
				bRemoveClasses = True
			else:
				raise Exception('unknown type')

			(ni,nu) = computeIOU(predicted_segmentation,gt_path,bAddBg,bRemoveBg,bRemoveClasses);
			num_intersection[s] = np.add(num_intersection[s],ni)
			num_union[s] = np.add(num_union[s],nu)
			current_iou[s] = [float(num_intersection[s][i])/num_union[s][i] if num_union[s][i] > 0 else 0 for i in range(len(num_union[s]))];

		time_other += time.time()-tt
		
	for s in settings:
		print "Mean iou per class: ", repr(current_iou[s])
		print "Mean iou: ", mean(current_iou[s])
Exemple #25
0
sys.path.append('/usr/local/lib/python2.7/site-packages')
# Make sure that caffe is on the python path:
caffe_root = '/SegNet/caffe-segnet/'
sys.path.insert(0, caffe_root + 'python')
import caffe

# Import arguments
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, required=True)
parser.add_argument('--weights', type=str, required=True)
parser.add_argument('--colours', type=str, required=True)
args = parser.parse_args()

net = caffe.Net(args.model,
                args.weights,
                caffe.TEST)

caffe.set_mode_gpu()

input_shape = net.blobs['data'].data.shape
output_shape = net.blobs['argmax'].data.shape

label_colours = cv2.imread(args.colours).astype(np.uint8)

cv2.namedWindow("Input")
cv2.namedWindow("SegNet")

cap = cv2.VideoCapture(0) # Change this to your webcam ID, or file name for your video file

if cap.isOpened(): # try to get the first frame
Exemple #26
0
def rrpn_test_MSRA(day, exp, model_name, mode):

    cfg.TEST.HAS_RPN = True  # Use RPN for proposals

    NETS = {'rrpn': ('VGG16', day + "/" + exp + "/" + model_name)}

    # rrpn only
    prototxt = os.path.join(cfg.RRPN_MODELS_DIR, NETS['rrpn'][0],
                            'faster_rcnn_end2end', 'test.prototxt')

    caffemodel = os.path.join(cfg.DATA_DIR, 'faster_rcnn_models',
                              NETS['rrpn'][1])

    print caffemodel

    if not os.path.isfile(caffemodel):
        raise IOError(('{:s} not found.\nDid you run ./data/script/'
                       'fetch_faster_rcnn_models.sh?').format(caffemodel))

    caffe.set_mode_gpu()
    caffe.set_device(0)
    cfg.GPU_ID = 0

    net = caffe.Net(prototxt, caffemodel, caffe.TEST)

    print '\n\nLoaded network {:s}'.format(caffemodel)

    # Warmup on a dummy image
    #im = 128 * np.ones((300, 500, 3), dtype=np.uint8)
    #for i in xrange(2):
    #	_, _= rotation_demo.r_im_detect(net, im)

    im_names = []  #['IMG_0030.JPG','IMG_0059.JPG','IMG_0063.JPG']

    roidb = test_rroidb(mode)

    gt_boxes = []

    for rdb in roidb:
        im_names.append(rdb['image'])
        gt_boxes.append(rdb['boxes'])

    if not os.path.isdir("./result/" + day + "/"):
        os.mkdir("./result/" + day + "/")
    if not os.path.isdir("./result/" + day + "/" + exp + "/"):
        os.mkdir("./result/" + day + "/" + exp + "/")
    if not os.path.isdir("./result/" + day + "/" + exp + "/" + model_name +
                         "/"):
        os.mkdir("./result/" + day + "/" + exp + "/" + model_name + "/")
    if not os.path.isdir("./result/" + day + "/" + exp + "/" + model_name +
                         "/" + mode + "/"):
        os.mkdir("./result/" + day + "/" + exp + "/" + model_name + "/" +
                 mode + "/")

    if not os.path.isdir("./result/" + day + "/" + exp + "/" + model_name +
                         "/" + mode + "_origin/"):
        os.mkdir("./result/" + day + "/" + exp + "/" + model_name + "/" +
                 mode + "_origin/")

    for im_idx in range(len(im_names)):
        print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
        print 'Demo for data/demo/{}'.format(im_names[im_idx])
        print "model: ", model_name
        #print im_names[im_idx], gt_boxes[im_idx]
        rotation_demo.demo(
            net, im_names[im_idx], gt_boxes[im_idx], "./result/" + day + "/" +
            exp + "/" + model_name + "/" + mode + "/", "./result/" + day +
            "/" + exp + "/" + model_name + "/" + mode + "_origin/")

    precision, recall = MSRA_eval(
        mode,
        "./result/" + day + "/" + exp + "/" + model_name + "/" + mode + "/")

    f_measure = (2 * precision * recall) / (precision + recall)

    if not os.path.isdir("./exper"):
        os.mkdir("./exper")
    if not os.path.isdir("./exper/" + day + "/"):
        os.mkdir("./exper/" + day + "/")
    if not os.path.isdir("./exper/" + day + "/" + exp + "/"):
        os.mkdir("./exper/" + day + "/" + exp + "/")

    PR_file = "./exper/" + day + "/" + exp + "/" + day + "_" + exp + "_" + model_name + "_" + mode
    if not os.path.isfile(PR_file):
        os.mknod(PR_file)

    PR_obj = open(PR_file, "w")

    PR_obj.write("precision: " + str(precision) + "\n" + "recall: " +
                 str(recall) + "\nf-measure: " + str(f_measure))
    PR_obj.close()

    precision_ori, recall_ori = MSRA_eval(
        mode, "./result/" + day + "/" + exp + "/" + model_name + "/" + mode +
        "_origin/")

    f_measure_ori = (2 * precision_ori * recall_ori) / (precision_ori +
                                                        recall_ori)

    if not os.path.isdir("./exper/" + day + "/" + exp + "_origin/"):
        os.mkdir("./exper/" + day + "/" + exp + "_origin/")

    PR_file_ori = "./exper/" + day + "/" + exp + "_origin/" + day + "_" + exp + "_" + model_name + "_" + mode
    if not os.path.isfile(PR_file_ori):
        os.mknod(PR_file_ori)

    PR_obj_ori = open(PR_file_ori, "w")

    PR_obj_ori.write("precision: " + str(precision_ori) + "\n" + "recall: " +
                     str(recall_ori) + "\nf-measure: " + str(f_measure_ori))
    PR_obj_ori.close()

    print "precision: " + str(precision_ori) + "\n" + "recall: " + str(
        recall_ori) + "\nf-measure: " + str(f_measure_ori)
Exemple #27
0
def load_model(net_file, path):
    '''
    return caffe.Net'''
    import caffe
    net = caffe.Net(net_file, path, caffe.TEST)
    return net
Exemple #28
0
def rrpn_test_ICDAR2015_multiScale(day, exp, model_name, mode):

    cfg.TEST.HAS_RPN = True  # Use RPN for proposals

    NETS = {'rrpn': ('VGG16', day + "/" + exp + "/" + model_name)}

    # rrpn only
    prototxt = os.path.join(cfg.RRPN_MODELS_DIR, NETS['rrpn'][0],
                            'faster_rcnn_end2end', 'test.prototxt')

    caffemodel = os.path.join(cfg.DATA_DIR, 'faster_rcnn_models',
                              NETS['rrpn'][1])

    print caffemodel

    if not os.path.isfile(caffemodel):
        raise IOError(('{:s} not found.\nDid you run ./data/script/'
                       'fetch_faster_rcnn_models.sh?').format(caffemodel))

    caffe.set_mode_gpu()
    caffe.set_device(0)
    cfg.GPU_ID = 0

    net = caffe.Net(prototxt, caffemodel, caffe.TEST)

    print '\n\nLoaded network {:s}'.format(caffemodel)

    # Warmup on a dummy image
    #im = 128 * np.ones((300, 500, 3), dtype=np.uint8)
    #for i in xrange(2):
    # 	_, _= rotation_demo.r_im_detect(net, im)

    im_names = []  #['IMG_0030.JPG','IMG_0059.JPG','IMG_0063.JPG']

    roidb = test_ICDAR2015(mode)

    gt_boxes = []

    for rdb in roidb:
        im_names.append(rdb['image'])
        gt_boxes.append([0, 0, 0, 0, 0])

    if not os.path.isdir("./result/" + day + "/"):
        os.mkdir("./result/" + day + "/")
    if not os.path.isdir("./result/" + day + "/" + exp + "/"):
        os.mkdir("./result/" + day + "/" + exp + "/")
    if not os.path.isdir("./result/" + day + "/" + exp + "/" + model_name +
                         "/"):
        os.mkdir("./result/" + day + "/" + exp + "/" + model_name + "/")
    if not os.path.isdir("./result/" + day + "/" + exp + "/" + model_name +
                         "/" + mode + "/"):
        os.mkdir("./result/" + day + "/" + exp + "/" + model_name + "/" +
                 mode + "/")

    if not os.path.isdir("./result/" + day + "/" + exp + "/" + model_name +
                         "/" + mode + "_origin/"):
        os.mkdir("./result/" + day + "/" + exp + "/" + model_name + "/" +
                 mode + "_origin/")

    scales = [900, 1000]
    max_sizes = [1500, 1700]

    for im_idx in range(len(im_names)):
        print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
        print 'Demo for data/demo/{}'.format(im_names[im_idx])
        print "model: ", model_name
        #print im_names[im_idx], gt_boxes[im_idx]

        result_queue = []

        for i in range(len(scales)):
            cfg.TEST.SCALES = (scales[i], )
            cfg.TEST.MAX_SIZE = max_sizes[i]
            print "result for scale", scales[i], ":"
            result = rotation_demo.demo(
                net, im_names[im_idx], gt_boxes[im_idx], "./result/" + day +
                "/" + exp + "/" + model_name + "/" + mode + "/", "./result/" +
                day + "/" + exp + "/" + model_name + "/" + mode + "_origin/")
            result_queue.extend(result)

        im_file = im_names[im_idx]
        im = cv2.imread(im_file)

        im_height = im.shape[0]
        im_width = im.shape[1]

        dets = det_fusion(result_queue)
        rotation_demo.write_result_ICDAR(
            im_names[im_idx], dets, 0.9, "./result/" + day + "/" + exp + "/" +
            model_name + "/" + mode + "/", im_height, im_width)
    '''
Exemple #29
0
sys.path.insert(0, caffe_root + 'python')  
import caffe  
import time;  

count = 0

net_file= '../../faceboxes_deploy.prototxt'  
caffe_model='../../FaceBoxes_1024x1024.caffemodel'  
test_dir = "../../AFW/"

if not os.path.exists(caffe_model):
    print("FaceBoxes_deploy.caffemodel does not exist,")
    print("use merge_bn.py to generate it.")
    exit()
caffe.set_mode_gpu()
net = caffe.Net(net_file,caffe_model,caffe.TEST)  

CLASSES = ('background',
           'face')

transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
transformer.set_transpose('data', (2, 0, 1))
transformer.set_mean('data', np.array([104, 117, 123]))  # mean pixel
#transformer.set_raw_scale('data', 255)
#transformer.set_channel_swap('data', (2, 1, 0))

def preprocess(src):
    img = cv2.resize(src, (1024,1024))
    img = img - 127.5
    img = img * 0.007843
    return img
netKind = 48
quantizeBitNum = 2
stochasticRounding = False

# ==================  caffe  ======================================
caffe_root = '/home/anson/caffe-master/'  # this file is expected to be in {caffe_root}/examples
sys.path.insert(0, caffe_root + 'python')
import caffe

# ==================  load soft quantized params  ======================================
MODEL_FILE = '/home/anson/caffe-master/models/face_' + str(
    netKind) + '_cal/deploy.prototxt'
PRETRAINED = '/home/anson/caffe-master/models/face_' + str(netKind) \
             + '_cal/face_' + str(netKind) + '_cal_train_iter_32500.caffemodel'
caffe.set_mode_gpu()
net = caffe.Net(MODEL_FILE, PRETRAINED, caffe.TEST)
# ============ should be modified for different files ================
if netKind == 12:
    params = ['conv1', 'fc2', 'fc3']
elif netKind == 24:
    params = ['conv1', 'fc2', 'fc3']
elif netKind == 48:
    params = ['conv1', 'conv2', 'fc3', 'fc4']
# =====================================================================
# fc_params = {name: (weights, biases)}
original_params = {
    pr: (net.params[pr][0].data, net.params[pr][1].data)
    for pr in params
}

# ==================  load file to save quantized parameters  =======================