def pre_trained_model(left_im, right_im, reuse=False):
    # Resnet model tensors have changed. Not compatible with checkpoint tensors.
    '''
    from tensorflow.contrib.slim.nets import resnet_v1
    import tensorflow.contrib.slim as slim

    # Create graph
    inputs = tf.placeholder(tf.float32, shape=[64, input.shape[1].value, \
                                            input.shape[2].value, input.shape[3].value])
    with slim.arg_scope(resnet_v1.resnet_arg_scope()):
        net, end_points = resnet_v1.resnet_v1_50(inputs, is_training=False)
    saver = tf.train.Saver()    

    with tf.Session() as sess:
        pdb.set_trace()
        saver.restore(sess, tf.train.latest_checkpoint('resnet50_ckpt'))
        representation_tensor = sess.graph.get_tensor_by_name('feature_generator/resnet_v1_50/pool5:0')
        #img = ...  #load image here with size [1, 224,224, 3]
        #pdb.set_trace()
        features = sess.run(representation_tensor, {'Placeholder:0': input})
    '''
    import tensornets as nets
    # tf.disable_v2_behavior()  # for TF 2

    left_feats = nets.ResNet50(left_im, is_training=True, reuse=tf.AUTO_REUSE)
    left_feats.pretrained()
    right_feats = nets.ResNet50(right_im,
                                is_training=True,
                                reuse=tf.AUTO_REUSE)
    right_feats.pretrained()

    merged_features = tf.abs(tf.subtract(left_feats, right_feats))
    return merged_features, left_feats, right_feats
Esempio n. 2
0
def train_resnet50(
    image_data_loader: ImageDatasetLoader,
    parameter: ResNet50TrainParameter,
):
    classes = len(image_data_loader.image_classes)
    dataset = image_data_loader.build_dataset()

    inputs = tf.placeholder(tf.float32, [None, 224, 224, 3])
    outputs = tf.placeholder(tf.float32, [None, classes])
    model = nets.ResNet50(inputs, is_training=True, classes=classes)

    loss = tf.losses.softmax_cross_entropy(outputs, model)
    train = tf.train.GradientDescentOptimizer(parameter.learning_rate) \
        .minimize(loss)

    saver = tf.train.Saver()

    it = dataset.make_one_shot_iterator()
    get_next = it.get_next()
    with tf.Session() as sess:
        sess.run(model.pretrained())
        while True:
            try:
                image_data, label = sess.run(get_next)
            except tf.errors.OutOfRangeError:
                break
            sess.run(train, {inputs: image_data, outputs: label})

        save_path = saver.save(sess, parameter.checkpoint_save_path)
        print('model checkpoint saved to: {}'.format(save_path))
Esempio n. 3
0
    featlayer = model.get_outputs()[-4]  # ?x4032
    bs = 250
elif modelname == 'VGG19':
    model = nets.VGG19(inputs)
    featlayer = model.get_outputs()[-4]  # ?x4096
    bs = 100
elif modelname == 'MobileNet25':
    model = nets.MobileNet25(inputs)
    featlayer = model.get_outputs()[-4]
    bs = 2500
elif modelname == 'SqueezeNet':
    model = nets.SqueezeNet(inputs)
    featlayer = model.get_outputs()[-2]
    bs = 1000
elif modelname == 'ResNet50':
    model = nets.ResNet50(inputs)
    featlayer = model.get_outputs()[-3]  # 'avgpool:0', ?x2048
    bs = 500
elif modelname == 'InceptionResNet2':
    model = nets.InceptionResNet2(inputs)
    featlayer = model.get_outputs()[-4]
    bs = 250
else:
    print('Unknown model: ', modelname)
    raise SystemExit

model_pretrained = model.pretrained()

# ind selects which animal to extract features for
try:
    ind = int(sys.argv[2])
Esempio n. 4
0
def do_extract(args):
  if args.seed is not None:
    np.random.seed(args.seed)

  FILENAMES = sorted(glob.glob(args.images))
  nimages = len(FILENAMES)
  if nimages == 0:
    print("Did not find any images in '{}".format(args.images))
    return 
  if args.verbose:
    print("Processing {} images".format(nimages))
    
  if args.model == 'NASNetAlarge':
    target_size, crop_size = 331, 331
  else:
    target_size, crop_size = 256, 224
  
  inputs = tf.placeholder(tf.float32, [None, crop_size, crop_size, 3])
  
  if args.model == 'NASNetAlarge':
    model = nets.NASNetAlarge(inputs)
    bs = 250
  elif args.model == 'VGG19':
    model = nets.VGG19(inputs)
    bs = 200
  elif args.model == 'MobileNet25':
    model = nets.MobileNet25(inputs)
    bs = 2500
  elif args.model == 'SqueezeNet':
    model = nets.SqueezeNet(inputs)
    bs = 1000
  elif args.model == 'ResNet50':
    model = nets.ResNet50(inputs)
    bs = 500
  else:
    raise ValueError # this should not happen
  
  model_pretrained = model.pretrained()

  if args.batchsize: 
    bs = args.batchsize # overwrite default batchsize

  nchunks = (nimages+bs-1)//bs
  
  PREDS = [] # beware: we store all activations (#images x #classes x sizeof(float))
  with tf.Session() as sess:
      sess.run(model_pretrained)
      
      for i in xrange(nchunks):
          images = []
          for filename in FILENAMES[i*bs:(i+1)*bs]:
              img = utils.load_img(filename, target_size=target_size, crop_size=crop_size)
              if args.blur:
                  img = apply_blur(img, args.blur)
              if args.noise:
                  img = apply_noise(img, args.noise)
              if args.dead:
                  img = apply_deadpixels(img, args.dead)
              if args.dark:
                  img = apply_scaletoblack(img, args.dark)
              if args.bright:
                  img = apply_scaletoblack(img, args.bright)
              if args.geometry:
                  img = apply_geometry(img, args.geometry)

              images.append( img.squeeze() )

          images = model.preprocess(np.asarray(images))

          preds = sess.run(model, {inputs: images})
          PREDS.extend(preds)

          if args.verbose:
            print('Processed chunk {} of {}'.format(i, nchunks))
            print('Most recent prediction:', utils.decode_predictions(preds, top=1)[0])

      PREDS = np.asarray(PREDS)
      if args.output:
          np.savetxt(args.output, PREDS.max(axis=1), fmt='%.12f')
      if args.rawoutput:
          np.savez_compressed(args.rawoutput, PREDS)
      if args.labeloutput:
          np.savetxt(args.labeloutput, PREDS.argmax(axis=1), fmt='%d')

  if args.verbose:
    print("Done.")
Esempio n. 5
0
if __name__ == '__main__':
    # processing cmd line args
    parser = argparse.ArgumentParser('generate layer projections')
    g1 = parser.add_argument_group('computation options')
    g1.add_argument('--layer_no', type=int, default=0)
    g1.add_argument('--batchsize', type=int, default=12)
    args = parser.parse_args()

    # Filter datasets
    (Xtrain, Ytrain), (Xtest, Ytest) = tf.keras.datasets.cifar10.load_data()
    Xtrain, Ytrain = filter_photo(Xtrain, Ytrain)
    max_data = Xtrain.shape[0]

    # Configure
    inputs = tf.placeholder(tf.float32, [None, 224, 224, 3])
    model = nets.ResNet50(inputs, is_training=False)
    load_model_weights = model.pretrained()

    dir_str = 'pickle_data/layer_{layer_no}'.format(layer_no=args.layer_no)
    directory = os.makedirs(dir_str, exist_ok=True)

    # Filter layers
    tensor_names = []
    with open('pickle_data/tensor_names.pickle', 'rb') as ph:
        tensor_names = [
            p for p, name in enumerate(pickle.load(ph)) if 'relu' in name
        ]

    if args.layer_no is 0:
        print("Batch Size: ", args.batchsize)
        print("Max Data: ", max_data)