Пример #1
0
  net_google= caffe.Classifier( MODEL_GOOGLE_DEPLOY_FILE, MODEL_GOOGLE_WEIGHT_FILE, mean = MODEL_MEAN_VALUE, channel_swap = (2, 1, 0) ) 
  net_vgg   = caffe.Classifier( MODEL_VGG_DEPLOY_FILE, MODEL_VGG_WEIGHT_FILE, mean = MODEL_MEAN_VALUE, channel_swap = (2, 1, 0) ) 

  src_google = net_google.blobs['data']
  src_google.reshape(FEATURE_JITTER, 3, MODEL_INPUT_SIZE[0], MODEL_INPUT_SIZE[1])
  src_vgg = net_vgg.blobs['data']
  src_vgg.reshape(FEATURE_JITTER, 3, MODEL_INPUT_SIZE[0], MODEL_INPUT_SIZE[1])

  filenames=['%s' % entry.strip().split(' ')[0] for entry in open('%s/%s' % (DATASET_ROOT, DATASET_LIST))]
  feat_vgg   = np.squeeze(np.zeros((len(filenames), FEATURE_JITTER, FEATURE_DIM_VGG), dtype=np.float32))
  feat_google= np.squeeze(np.zeros((len(filenames), FEATURE_JITTER, FEATURE_DIM_GOOGLE), dtype=np.float32))
  import pdb; pdb.set_trace()

  for n, fname in enumerate(filenames):
    try:
      im = utils.load_image( '%s/jpg/%s' %(DATASET_ROOT, fname) )
      im = im.resize( MODEL_ORIGINAL_INPUT_SIZE, PIL.Image.ANTIALIAS )
      im = utils.preprocess(net_google, im)
    except:
      print 'error: filename: ', fname
    if FEATURE_JITTER == 10:
      im_jittered = utils.oversample(im, MODEL_INPUT_SIZE)
      src_google.data[:], src_vgg.data[:] = im_jittered, im_jittered
    else: src_google.data[:], src_vgg.data[:] = im[:], im[:]

    net_google.forward(end='pool5/7x7_s1')
    net_vgg.forward(end='fc6')
    feat_vgg[n] = net_vgg.blobs['fc6'].data
    feat_google[n] = np.squeeze(net_google.blobs['pool5/7x7_s1'].data)

    if (n+1) % 10 == 0: print 'End of ', n+1; sys.stdout.flush()
Пример #2
0
import pdb; pdb.set_trace()
caffe.set_mode_cpu()
net_vgg   = caffe.Classifier( MODEL_VGG_DEPLOY_FILE, MODEL_VGG_WEIGHT_FILE, mean = MODEL_MEAN_VALUE, channel_swap = (2, 1, 0) )
src_vgg = net_vgg.blobs['data']
src_vgg.reshape(1, 3, MODEL_ORIGINAL_INPUT_SIZE[0], MODEL_ORIGINAL_INPUT_SIZE[1])

filenames=['%s' % entry.strip().split(' ')[0] for entry in open('%s/%s' % (DATASET_ROOT, DATASET_LIST))]

end_blob_name  = ['pool1', 'pool2', 'pool3', 'pool4', 'pool5', 'fc6_conv', 'relu6_conv']
copy_to_blob_name= ['deconv2_1', 'deconv3_1', 'deconv4_1', 'deconv5_1', 'fc6_deconv', 'relu6_deconv', 'fc7_deconv']
decon_layer_name = ['unpool1', 'unpool2', 'unpool3', 'unpool4', 'unpool5', 'fc6_deconv', 'relu6_deconv']
iter_idx = [0, 1, 2, 3, 4, 5, 6]

print 'Start deconvolution'
for file_id, filename in enumerate(filenames):
  im = utils.load_image( '%s/%s' %(DATASET_ROOT, filename) )
  im = im.resize( MODEL_ORIGINAL_INPUT_SIZE, PIL.Image.ANTIALIAS )
  im = utils.preprocess(net_vgg, im); src_vgg.data[:] = im[:]
  print 'Done load image'

  for layer_idx, end_blob, copy_to, deconv_layer in zip(iter_idx, end_blob_name, copy_to_blob_name, decon_layer_name):
    net_vgg.forward(end=end_blob)
    net_vgg.blobs[copy_to].data[...] = np.copy(net_vgg.blobs[end_blob].data)
    net_vgg.forward(start=deconv_layer)
  
    recon = np.copy(net_vgg.blobs['deconv1_1'].data[0])
    recon = utils.deprocess(net_vgg, recon)
    min_val, max_val = recon.flatten().min(), recon.flatten().max()
    print "{}, layer {}, dim: {}={}, mean: {}, min_val: {}, max_val: {}".format( \
      filename, end_blob, net_vgg.blobs[end_blob].data[0].shape, \
      np.prod(net_vgg.blobs[end_blob].data[0].shape), \
  net_google= caffe.Classifier( MODEL_GOOGLE_DEPLOY_FILE, MODEL_GOOGLE_WEIGHT_FILE, mean = MODEL_MEAN_VALUE, channel_swap = (2, 1, 0) ) 
  net_vgg   = caffe.Classifier( MODEL_VGG_DEPLOY_FILE, MODEL_VGG_WEIGHT_FILE, mean = MODEL_MEAN_VALUE, channel_swap = (2, 1, 0) ) 

  src_google = net_google.blobs['data']
  src_google.reshape(FEATURE_JITTER, 3, MODEL_INPUT_SIZE[0], MODEL_INPUT_SIZE[1])
  src_vgg = net_vgg.blobs['data']
  src_vgg.reshape(FEATURE_JITTER, 3, MODEL_INPUT_SIZE[0], MODEL_INPUT_SIZE[1])

  filenames=['%s/val/%s' % (DATASET_ROOT, entry.strip().split(' ')[0]) for entry in open('%s' % DATASET_INPUT_LIST)]
  labels = [entry.strip().split(' ')[1] for entry in open('%s' % DATASET_INPUT_LIST)]

  hit_count, hit5_count, tic_global = 0, 0, datetime.datetime.now()
  for n, fname in enumerate(filenames):
    try:
      tic_load = datetime.datetime.now()
      im = utils.load_image( fname )
      toc_load = datetime.datetime.now(); elapsed_load = toc_load - tic_load
      tic_resize = datetime.datetime.now()
      im = im.resize( MODEL_ORIGINAL_INPUT_SIZE, PIL.Image.ANTIALIAS )
      toc_resize = datetime.datetime.now(); elapsed_resize = toc_resize - tic_resize
      im = utils.preprocess(net_google, im)
      tic_jittering = datetime.datetime.now()
    except:
      print 'error: filename: ', fname
    if FEATURE_JITTER == 10:
      im_jittered = utils.oversample(im, MODEL_INPUT_SIZE)
      src_google.data[:] = im_jittered
      src_vgg.data[:] = im_jittered
    else: src_google.data[0], src_vgg.data[0] = im[:], im[:]
    toc_jittering = datetime.datetime.now(); elapsed_jittering = toc_jittering - tic_jittering
  #import pdb; pdb.set_trace()
  caffe.set_device(0)
  caffe.set_mode_gpu()
  net = caffe.Classifier( 
    MODEL_GOOGLE_DEPLOY_FILE, MODEL_GOOGLE_WEIGHT_FILE, 
    mean = MODEL_MEAN_VALUE, channel_swap = (2, 1, 0) ) 

  src = net.blobs['data']
  src.reshape(FEATURE_JITTER, 3, MODEL_INPUT_SIZE[0], MODEL_INPUT_SIZE[1])

  for n, fname in enumerate(entries):
    try:
      query_filename = fname[0] 
      ref_filename = fname[1]
      label = int(fname[2])
      inputs_q  = utils.load_image( os.path.join(prefix, query_filename) )
      inputs_ref= utils.load_image( os.path.join(prefix, ref_filename) )

      if FEATURE_JITTER == 10:
        inputs_q  = inputs_q.resize( MODEL_ORIGINAL_INPUT_SIZE, PIL.Image.ANTIALIAS )
        inputs_ref= inputs_ref.resize( MODEL_ORIGINAL_INPUT_SIZE, PIL.Image.ANTIALIAS )
        inputs_q  = utils.preprocess(net, inputs_q)
        inputs_ref= utils.preprocess(net, inputs_ref)
        inputs_q  = utils.oversample(inputs_q, MODEL_INPUT_SIZE)
        inputs_ref= utils.oversample(inputs_ref, MODEL_INPUT_SIZE)
      else:
        inputs_q  = inputs_q.resize( MODEL_INPUT_SIZE, PIL.Image.ANTIALIAS )
        inputs_ref= inputs_ref.resize( MODEL_INPUT_SIZE, PIL.Image.ANTIALIAS )
        inputs_q  = utils.preprocess(net, inputs_q)
        inputs_ref= utils.preprocess(net, inputs_ref)
        inputs_q  = inputs_q[np.newaxis,:,:,:]
if __name__ == '__main__':
  #import pdb; pdb.set_trace()
  entries=[entry.strip().split(' ') for entry in open('%s/PAIRS/CDVS_1M_PAIRS/%s' % (DATASET_ROOT, DATASET_LIST))]
  #import pdb; pdb.set_trace()
  print 'Load data from: ', DATASET_LIST

  LMDB_NAME = '%s/%s' % (LMDB_PATH, LMDB_FILENAME)
  os.system('rm -rf %s' % (LMDB_NAME))
  env = lmdb.open('%s' %(LMDB_NAME), map_size=1e12)

  #import pdb; pdb.set_trace()
  n = 0
  for entry in entries:
    try:
      docid_i, docid_j, dist, label = entry[0], entry[1], entry[2], entry[3]
      im_i = utils.load_image( '%s/%s' %(DATASET_ROOT, docid_i))
      im_i = im_i.resize( MODEL_ORIGINAL_INPUT_SIZE, PIL.Image.ANTIALIAS )
      im_i = np.uint8(im_i)
      im_i = np.rollaxis(im_i, 2)[::-1]
      im_j = utils.load_image( '%s/%s' %(DATASET_ROOT, docid_j))
      im_j = im_j.resize( MODEL_ORIGINAL_INPUT_SIZE, PIL.Image.ANTIALIAS )
      im_j = np.uint8(im_j)
      im_j = np.rollaxis(im_j, 2)[::-1]
      im = np.vstack((im_i, im_j))
      datum = caffe.proto.caffe_pb2.Datum()
      datum.channels = im.shape[0]
      datum.height = im.shape[1]
      datum.width = im.shape[2]
      datum.data = im.tobytes()
      datum.label = int(label)
      str_id = '{:0>10d}'.format(n)
Пример #6
0
  src_google = net_google.blobs['data']
  src_google.reshape(FEATURE_JITTER, 3, MODEL_INPUT_SIZE[0], MODEL_INPUT_SIZE[1])
  src_vgg = net_vgg.blobs['data']
  src_vgg.reshape(FEATURE_JITTER, 3, MODEL_INPUT_SIZE[0], MODEL_INPUT_SIZE[1])

  #import pdb; pdb.set_trace()
  for nn in range(5):
    DATASET_LIST = '%s_%02d.txt' % (SET_ID, nn)
    print 'Start ', DATASET_LIST
    filenames=['%s' % entry.strip().split(' ')[0] for entry in open('%s/%s' % (DATASET_ROOT, DATASET_LIST))]
    feat_vgg   = np.squeeze(np.zeros((len(filenames), FEATURE_JITTER, FEATURE_DIM_VGG), dtype=np.float32))
    feat_google= np.squeeze(np.zeros((len(filenames), FEATURE_JITTER, FEATURE_DIM_GOOGLE), dtype=np.float32))

    for n, fname in enumerate(filenames):
      try:
        im = utils.load_image( '%s/%s/%s' %(DATASET_ROOT, SET_ID, fname) )
        im = im.resize( MODEL_ORIGINAL_INPUT_SIZE, PIL.Image.ANTIALIAS )
        im = utils.preprocess(net_google, im)
      except:
        print 'ERROR: filename: ', fname
        traceback.print_exc(file=sys.stdout)
        sys.exit(-1)
      if FEATURE_JITTER == 10:
        im_jittered = utils.oversample(im, MODEL_INPUT_SIZE)
        src_google.data[:], src_vgg.data[:] = im_jittered, im_jittered
      else: src_google.data[:], src_vgg.data[:] = im[:], im[:]

      net_google.forward(end='pool5/7x7_s1')
      net_vgg.forward(end='fc6')
      feat_vgg[n] = net_vgg.blobs['fc6'].data
      feat_google[n] = np.squeeze(net_google.blobs['pool5/7x7_s1'].data)
Пример #7
0
    src_vgg = net_vgg.blobs["data"]
    src_vgg.reshape(FEATURE_JITTER, 3, MODEL_INPUT_SIZE[0], MODEL_INPUT_SIZE[1])

    import pdb

    pdb.set_trace()
    for nn in range(5):
        DATASET_LIST = "%s_%02d.txt" % (SET_ID, nn)
        print "Start ", DATASET_LIST
        filenames = ["%s" % entry.strip().split(" ")[0] for entry in open("%s/%s" % (DATASET_ROOT, DATASET_LIST))]
        feat_vgg = np.squeeze(np.zeros((len(filenames), FEATURE_JITTER, FEATURE_DIM_VGG), dtype=np.float32))
        feat_google = np.squeeze(np.zeros((len(filenames), FEATURE_JITTER, FEATURE_DIM_GOOGLE), dtype=np.float32))

        for n, fname in enumerate(filenames):
            try:
                im = utils.load_image("%s/%s/%s" % (DATASET_ROOT, SET_ID, fname))
                im = im.resize(MODEL_ORIGINAL_INPUT_SIZE, PIL.Image.ANTIALIAS)
                im = utils.preprocess(net_google, im)
            except:
                print "ERROR: filename: ", fname
                traceback.print_exc(file=sys.stdout)
                sys.exit(-1)
            if FEATURE_JITTER == 10:
                im_jittered = utils.oversample(im, MODEL_INPUT_SIZE)
                src_google.data[:], src_vgg.data[:] = im_jittered, im_jittered
            else:
                src_google.data[:], src_vgg.data[:] = im[:], im[:]

            net_google.forward(end="pool5/7x7_s1")
            net_vgg.forward(end="fc6")
            feat_vgg[n] = net_vgg.blobs["fc6"].data
Пример #8
0
  net_google= caffe.Classifier( MODEL_GOOGLE_DEPLOY_FILE, MODEL_GOOGLE_WEIGHT_FILE, mean = MODEL_MEAN_VALUE, channel_swap = (2, 1, 0) ) 
  net_vgg   = caffe.Classifier( MODEL_VGG_DEPLOY_FILE, MODEL_VGG_WEIGHT_FILE, mean = MODEL_MEAN_VALUE, channel_swap = (2, 1, 0) ) 

  src_google = net_google.blobs['data']
  src_google.reshape(FEATURE_JITTER, 3, MODEL_INPUT_SIZE[0], MODEL_INPUT_SIZE[1])
  src_vgg = net_vgg.blobs['data']
  src_vgg.reshape(FEATURE_JITTER, 3, MODEL_INPUT_SIZE[0], MODEL_INPUT_SIZE[1])

  filenames=['%s' % entry.strip() for entry in open('%s/%s' % (DATASET_ROOT, DATASET_LIST))]
  feat_vgg   = np.squeeze(np.zeros((len(filenames), FEATURE_JITTER, FEATURE_DIM_VGG), dtype=np.float32))
  feat_google= np.squeeze(np.zeros((len(filenames), FEATURE_JITTER, FEATURE_DIM_GOOGLE), dtype=np.float32))
  #import pdb; pdb.set_trace()

  for n, fname in enumerate(filenames):
    try:
      im = utils.load_image( '%s/oxbuild_images/%s' %(DATASET_ROOT, fname) )
      im = im.resize( MODEL_ORIGINAL_INPUT_SIZE, PIL.Image.ANTIALIAS )
      im = utils.preprocess(net_google, im)
    except:
      print 'error: filename: ', fname
    if FEATURE_JITTER == 10:
      im_jittered = utils.oversample(im, MODEL_INPUT_SIZE)
      src_google.data[:], src_vgg.data[:] = im_jittered, im_jittered
    else: src_google.data[:], src_vgg.data[:] = im[:], im[:]

    net_google.forward(end='pool5/7x7_s1')
    net_vgg.forward(end='fc6')
    feat_vgg[n] = net_vgg.blobs['fc6'].data
    feat_google[n] = np.squeeze(net_google.blobs['pool5/7x7_s1'].data)

    if (n+1) % 1 == 0: print 'End of ', n+1; sys.stdout.flush()
Пример #9
0
      fname__ = "/".join(fname_)
      dic_ref[n], dic_idx[fname__] = fname__, n

    #import pdb; pdb.set_trace()
    if len(dic_ref) <> len(dic_idx):
      print 'len(dic_ref) len(dic_idx) mismatched'
      sys.exit(-1)

    database['dic_ref'] = dic_ref
    database['dic_idx'] = dic_idx

    #import pdb; pdb.set_trace()
    for n, fname in enumerate(filenames):
      if n >= NUM_SAMPLE: break
      try:
        im = utils.load_image( '%s' %(fname) )
        im = im.resize( MODEL_ORIGINAL_INPUT_SIZE, PIL.Image.ANTIALIAS )
        im = utils.preprocess(net_google, im)
      except:
        print 'ERROR: filename: ', fname

      im_jittered = utils.oversample(im, MODEL_INPUT_SIZE)
      src_google.data[:], src_vgg.data[:] = im_jittered, im_jittered

      net_google.forward(end='pool5/7x7_s1')
      net_vgg.forward(end='fc6')
      feat_vgg = np.reshape(net_vgg.blobs['fc6'].data, (1,10*4096))
      feat_google = np.reshape(np.squeeze(net_google.blobs['pool5/7x7_s1'].data), (1,10*1024))
      feat = np.hstack((feat_vgg,feat_google))
      fea = (np.packbits(np.uint8(feat > 0), axis=1)).astype(np.uint16)
      fea_shift = fea << 8