示例#1
0
def test_all_gradients(init_img, net, all_target_blob_names, targets, target_data_list):
    # Set initial value and reshape net
    deepart.set_data(net, init_img)
    x0 = np.ravel(init_img).astype(np.float64)

    dx = 1e-2
    grad_err_thres = 1e-3
    test_count = 100

    input_shape = (1, 30, 40, 50)
    target_data = np.random.normal(size=input_shape)
    test_gradient(
        deepart.content_grad, input_shape, dx, grad_err_thres, test_count,
        target_data
    )
    target_data_gram = np.random.normal(size=(30, 30))
    test_gradient(
        deepart.style_grad, input_shape, dx, grad_err_thres, test_count,
        target_data_gram
    )

    target_data_list = gen_test_target_data(net, targets)
    test_gradient(
        deepart.objective_func, x0.shape, dx, grad_err_thres, test_count,
        net, all_target_blob_names, targets, target_data_list
    )
示例#2
0
def test_all_gradients(init_img, net, all_target_blob_names, targets,
                       target_data_list):
    # Set initial value and reshape net
    deepart.set_data(net, init_img)
    x0 = np.ravel(init_img).astype(np.float64)

    dx = 1e-2
    grad_err_thres = 1e-3
    test_count = 100

    input_shape = (1, 30, 40, 50)
    target_data = np.random.normal(size=input_shape)
    test_gradient(deepart.content_grad, input_shape, dx, grad_err_thres,
                  test_count, target_data)
    target_data_gram = np.random.normal(size=(30, 30))
    test_gradient(deepart.style_grad, input_shape, dx, grad_err_thres,
                  test_count, target_data_gram)

    target_data_list = gen_test_target_data(net, targets)
    test_gradient(deepart.objective_func, x0.shape, dx, grad_err_thres,
                  test_count, net, all_target_blob_names, targets,
                  target_data_list)
示例#3
0
def deepart_reconstruct(model='vgg',blob_names=['conv3_1','conv4_1','conv5_1'],blob_weights=[1,1,1],prefix='data',subsample=1,max_iter=2000,test_indices=None,data_indices=None,image_dims=(224,224),device_id=0,hybrid_names=[],hybrid_weights=[],tv_lambda=0.001,tv_beta=2,gaussian_init=False,dataset='lfw',desc=''):
  # model = vgg | vggface
  # blob_names = list of blobs to match (must be in the right order, front to back)
  # blob_weights = cost function weight for each blob
  # prefix = target features will be read from PREFIX_BLOB.h5
  # subsample = process every N from the dataset
  # max_iter = number of iters to optimize (2000+ for good quality)
  # test_indices = list of dataset indices (corresponds to each entry in h5 files)
  # data_indices = list of h5 indices (for computing subsets of the data)
  #   Example: data_indices=[0,3], test_indices=[4,2] means compute with the first
  #   and fourth features in the h5 file and compare against the fifth and third
  #   images in the dataset.

  t0=time.time()

  # create network
  caffe,net,image_dims=setup_classifier(model=model,image_dims=image_dims,device_id=device_id)

  # init result dir
  root_dir='results_{}'.format(int(round(t0))) if desc=='' else 'results_{}_{}'.format(int(round(t0)),desc)
  if not os.path.exists(root_dir):
    os.makedirs(root_dir)
  def print(*args):
    with open('{}/log.txt'.format(root_dir),'a') as f:
      f.write(' '.join(str(x) for x in args)+'\n')
    sys.stdout.write(' '.join(str(x) for x in args)+'\n')
  print('root_dir',root_dir)
  print('model',model)
  print('blob_names',blob_names)
  print('blob_weights',blob_weights)
  print('hybrid_names',hybrid_names)
  print('hybrid_weights',hybrid_weights)
  print('prefix',prefix)
  print('subsample',subsample)
  print('max_iter',max_iter)
  print('image_dims',image_dims)
  print('tv_lambda',tv_lambda)
  print('tv_beta',tv_beta)
  print('gaussian_init',gaussian_init)
  print('dataset',dataset)
  rlprint=ratelimit(interval=60)(print)

  # read features
  h5f={}
  for k in blob_names:
    assert os.path.exists('{}_{}.h5'.format(prefix,k))
    h5f[k]=h5py.File('{}_{}.h5'.format(prefix,k),'r')
    print('h5f',k,h5f[k]['DS'].shape,h5f[k]['DS'].dtype)
    N=h5f[k]['DS'].shape[0]
  #_,_,lfwattr=read_lfw_attributes()
  with open('dataset/{}.txt'.format(dataset)) as f:
    original_names=[x.strip() for x in f.readlines()]
  if data_indices is None:
    # assume you want to process everything
    data_indices=list(range(N))
  else:
    # require that you specify the data -> dataset mapping
    assert not test_indices is None
    assert len(data_indices)==len(test_indices)
  if test_indices is None:
    test_indices=list(range(N))

  for x in hybrid_names:
    assert x not in blob_names
  assert len(hybrid_names)==len(hybrid_weights)

  # processing
  psnr=[]
  ssim=[]
  work_units,work_done,work_t0=len(test_indices),0,time.time()
  basename_uid={}
  for j,i in enumerate(test_indices):
    if j % subsample: continue
    np.random.seed(123)

    #ipath='images/lfw/'+lfw_filename(lfwattr[i][0],lfwattr[i][1])
    ipath='images/'+original_names[i]
    #person=lfwattr[i][0]
    #seq=lfwattr[i][1]
    #basename=os.path.splitext(os.path.split(lfw_filename(person,seq))[1])[0]
    basename=os.path.splitext(os.path.split(ipath)[1])[0]
    if basename not in basename_uid:
      basename_uid[basename]=0
    else:
      basename_uid[basename]=basename_uid[basename]+1
    basename2='{}-{:02}'.format(basename,basename_uid[basename])

    # generate target list and target features
    all_target_blob_names=list(hybrid_names)+list(blob_names)
    targets=[]
    target_data_list=[]
    if len(hybrid_weights)>0:
      F=net.extract_features([ipath],hybrid_names,auto_reshape=True)
      for k,v in zip(hybrid_names,hybrid_weights):
        if len(targets)>0 and targets[-1][3]==v:
          targets[-1][1].append(k)
          target_data_list[-1][k]=F[k]
        else:
          targets.append((None,[k],False,v))
          target_data_list.append({k: F[k]})
        print('hybrid',k,v,F[k].shape,F[k].dtype)
    for k,v in zip(blob_names,blob_weights):
      if len(targets)>0 and targets[-1][3]==v:
        targets[-1][1].append(k)
        target_data_list[-1][k]=h5f[k]['DS'][data_indices[j]]
      else:
        targets.append((None,[k],False,v))
        target_data_list.append({k: h5f[k]['DS'][data_indices[j]]})
      print('target',k,v,h5f[k]['DS'][data_indices[j]].shape,h5f[k]['DS'][data_indices[j]].dtype)
    #target_data_list = gen_target_data(root_dir, caffe, net, targets)

    # Set initial value and reshape net
    if gaussian_init:
      init_img=np.random.normal(loc=0.5,scale=0.1,size=image_dims+(3,))
    else:
      init_img=caffe.io.load_image(ipath)
    deepart.set_data(net,init_img)
    #x0=np.ravel(init_img).astype(np.float64)
    x0=net.get_input_blob().ravel().astype(np.float64)
    bounds=zip(np.full_like(x0,-128),np.full_like(x0,162))
    solver_type='L-BFGS-B'
    solver_param={'maxiter': max_iter, 'iprint': -1}
    opt_res=scipy.optimize.minimize(deepart.objective_func,x0,args=(net,all_target_blob_names,targets,target_data_list,tv_lambda,tv_beta),bounds=bounds,method=solver_type,jac=True,options=solver_param)
    #print('opt_res',opt_res)
    #print('opt_res.x',opt_res.x.shape,opt_res.x.dtype)

    data=np.reshape(opt_res.x,net.get_input_blob().shape)[0]
    deproc_img=net.transformer.deprocess(net.inputs[0],data)
    A=caffe.io.load_image(ipath)
    B=np.clip(deproc_img,0,1)
    A=caffe.io.resize_image(A,B.shape)

    #print('A',A.shape,A.dtype,A.min(),A.max())
    #print('B',B.shape,B.dtype,B.min(),B.max())
    skimage.io.imsave('{}/{}-original.png'.format(root_dir,basename),A)
    skimage.io.imsave('{}/{}.png'.format(root_dir,basename2),B)
    #C=non_local_means('{}/{}.png'.format(root_dir,basename2),3,21,0.04,'{}/{}-nlm.png'.format(root_dir,basename2))
    caption='psnr {:.4}, ssim {:.4}'.format(measure.measure_PSNR(A,B,1).mean(),measure.measure_SSIM(A,B,1).mean())
    subprocess.check_call('convert {root_dir}/{basename}-original.png {root_dir}/{basename2}.png -size {w}x -font Arial-Italic -pointsize 12 caption:{caption} -append {root_dir}/eval_{basename2}.png'.format(root_dir=pipes.quote(root_dir),basename=pipes.quote(basename),basename2=pipes.quote(basename2),ipath=pipes.quote(ipath),caption=pipes.quote(caption),w=A.shape[1],h=A.shape[0]//10),shell=True)
    psnr.append(measure.measure_PSNR(A,B,1).mean())
    ssim.append(measure.measure_SSIM(A,B,1).mean())
    with open('{}/results.txt'.format(root_dir),'a') as f:
      f.write('"{}",{},{},{}\n'.format(basename2,i,psnr[-1],ssim[-1]))

    work_done=work_done+1*subsample
    rlprint('{}/{}, {} min remaining'.format(work_done,work_units,(work_units/work_done-1)*(time.time()-work_t0)/60.0))
  for k in h5f:
    h5f[k].close()

  print('psnr',psnr)
  print('ssim',ssim)
  psnr=np.asarray(psnr).mean()
  ssim=np.asarray(ssim).mean()
  with open('{}/results.txt'.format(root_dir),'a') as f:
    f.write(',{},{}\n'.format(psnr,ssim))

  t1=time.time()
  print('Finished in {} minutes.'.format((t1-t0)/60.0))
示例#4
0
def deepart_edit(model='vgg',blob_names=['conv3_1','conv4_1','conv5_1'],blob_weights=[1,1,1],prefix='data',subsample=1,max_iter=2000,test_indices=None,data_indices=None,image_dims=(224,224),device_id=0,hybrid_names=[],hybrid_weights=[],tv_lambda=0.001,tv_beta=2,gaussian_init=False,dataset='lfw',desc='edit'):
  t0=time.time()

  # create network
  caffe,net,image_dims=setup_classifier(model=model,image_dims=image_dims,device_id=device_id)

  # init result dir
  root_dir='results_{}'.format(int(round(t0))) if desc=='' else 'results_{}_{}'.format(int(round(t0)),desc)
  if not os.path.exists(root_dir):
    os.makedirs(root_dir)
  def print(*args):
    with open('{}/log.txt'.format(root_dir),'a') as f:
      f.write(' '.join(str(x) for x in args)+'\n')
    sys.stdout.write(' '.join(str(x) for x in args)+'\n')
  print('root_dir',root_dir)
  print('model',model)
  print('blob_names',blob_names)
  print('blob_weights',blob_weights)
  print('hybrid_names',hybrid_names)
  print('hybrid_weights',hybrid_weights)
  print('prefix',prefix)
  print('subsample',subsample)
  print('max_iter',max_iter)
  print('image_dims',image_dims)
  print('tv_lambda',tv_lambda)
  print('tv_beta',tv_beta)
  print('gaussian_init',gaussian_init)
  print('dataset',dataset)

  # image
  ipath='images/lfw/Winona_Ryder/Winona_Ryder_0024.jpg'
  init_img=caffe.io.load_image(ipath)
  print('init_img',init_img.shape,init_img.dtype)

  # generate target list and target features
  all_target_blob_names=list(hybrid_names)+list(blob_names)
  targets=[]
  target_data_list=[]
  if len(hybrid_weights)>0:
    F=net.extract_features([ipath],hybrid_names,auto_reshape=True)
    for k,v in zip(hybrid_names,hybrid_weights):
      if len(targets)>0 and targets[-1][3]==v:
        targets[-1][1].append(k)
        target_data_list[-1][k]=F[k]
      else:
        targets.append((None,[k],False,v))
        target_data_list.append({k: F[k]})
      print('hybrid',k,v,F[k].shape,F[k].dtype)
  for k,v in zip(blob_names,blob_weights):
    F=net.extract_features([ipath],blob_names,auto_reshape=True)
    if len(targets)>0 and targets[-1][3]==v:
      targets[-1][1].append(k)
      target_data_list[-1][k]=F[k]
    else:
      targets.append((None,[k],False,v))
      target_data_list.append({k: F[k]})
    print('target',k,v,F[k].shape,F[k].dtype)

  # image target = weighted L2 loss (1 x 3 x H x W)
  # gradient target = weighted L2 loss on finite diff (2 x 1 x K x H x W)
  # feature target = weighted L2 loss (1 x K x H x W)
  gradient_space_targets=[]
  if False:
    deepart.set_data(net,init_img)
    gen_data=net.get_input_blob().astype(np.float64)
    gradient_target=np.zeros((2,)+gen_data.shape,dtype=np.float64)
    gradient_target[0,:,:,:-1,:]=np.diff(gen_data,axis=2)*3
    gradient_target[1,:,:,:,:-1]=np.diff(gen_data,axis=3)*3
    gradient_weight=np.ones(gen_data.shape)
    gradient_space_targets.append((gradient_target,gradient_weight))

  image_space_targets=[]
  if False:
    color_img=skimage.io.imread('eyes.png')/255.0
    deepart.set_data(net,color_img[:,:,:3])
    gen_data=net.get_input_blob().astype(np.float64)
    image_target=np.copy(gen_data)
    image_weight=(color_img[:,:,3])[np.newaxis,np.newaxis]
    assert image_target.shape==(1,3,250,250)
    assert image_weight.shape==(1,1,250,250)
    assert image_weight.max()<=1
    image_space_targets.append((image_target,image_weight))

  if True:
    F=net.extract_features([ipath],all_target_blob_names,auto_reshape=True)
    k='conv3_1'
    v=1
    #print(k,F[k].min(),F[k].max(),np.linalg.norm(F[k]))
    for i in range(F[k].shape[1]):
      m=F[k][0,i].max()
      F[k][0,i]*=2
    targets.append((None,[k],False,v))
    target_data_list.append({k: np.copy(F[k])})
  

  # objective fn
  def objective_fn(x, net, all_target_blob_names, targets, target_data_list, tv_lambda, tv_beta):
    # def objective_func(x, net, all_target_blob_names, targets, target_data_list, tv_lambda, tv_beta):
    # x = current solution image
    # returns loss, gradients
    deepart.get_data_blob(net).data[...]=np.reshape(x,deepart.get_data_blob(net).data.shape)
    deepart.get_data_blob(net).diff[...]=0
    net.forward()

    loss = 0
    # Go through target blobs in reversed order
    for i in range(len(all_target_blob_names)):
        blob_i = len(all_target_blob_names) - 1 - i
        start = all_target_blob_names[blob_i]

        if blob_i == 0:
            end = None
        else:
            end = all_target_blob_names[blob_i - 1]

        # Get target blob
        target_blob = net.blobs[start]
        if i == 0:
            target_blob.diff[...] = 0

        gen_data = target_blob.data.copy()
        print('gen_data',gen_data.shape,gen_data.dtype) # debug
        # Apply RELU
        pos_mask = gen_data > 0
        gen_data[~pos_mask] = 0

        # Go through all images and compute accumulated gradient for the current target blob
        target_blob_add_diff = np.zeros_like(target_blob.diff, dtype=np.float64)
        for target_i, (_, target_blob_names, is_gram, weight) in enumerate(targets):
            # Skip if the current blob is not among the target's blobs
            if start not in target_blob_names:
                continue

            target_data = target_data_list[target_i][start]
            if is_gram:
                c_loss, c_grad = deepart.style_grad(gen_data, target_data)
            else:
                c_loss, c_grad = deepart.content_grad(gen_data, target_data)

            # Apply RELU
            c_grad[~pos_mask] = 0
            target_blob_add_diff += c_grad * weight / len(target_blob_names)
            loss += c_loss * weight / len(target_blob_names)

        target_blob.diff[...] += target_blob_add_diff
        net.backward(start=start, end=end)

    print('loss',loss)
    grad = np.ravel(deepart.get_data_blob(net).diff).astype(np.float64)

    # debug
    for (gradient_target, gradient_weight) in gradient_space_targets:
      gen_data = x.reshape(deepart.get_data_blob(net).data.shape)
      fy = np.diff(gen_data, axis=2)
      fx = np.diff(gen_data, axis=3)
      loss_g, grad_g = deepart.gradient_grad(gen_data, gradient_target, gradient_weight)
      grad_g = np.ravel(grad_g).astype(np.float64)
      loss += loss_g
      grad += grad_g
      print('loss_g',loss_g)

    for (image_target, image_weight) in image_space_targets:
      loss_i, grad_i = deepart.content_grad(gen_data, image_target, weight=image_weight)
      grad_i = np.ravel(grad_i).astype(np.float64)
      loss += loss_i
      grad += grad_i
      print('loss_i',loss_i)

    if tv_lambda > 0:
        tv_loss, tv_grad = totalvariation.tv_norm(x.reshape(deepart.get_data_blob(net).data.shape),beta=tv_beta)
        print('loss_tv',tv_loss*tv_lambda)
        return loss + tv_loss*tv_lambda, grad + np.ravel(tv_grad)*tv_lambda
    else:
        return loss, grad

  deepart.set_data(net,init_img)
  x0=net.get_input_blob().ravel().astype(np.float64)
  bounds=zip(np.full_like(x0,-128),np.full_like(x0,162))
  solver_type='L-BFGS-B'
  solver_param={'maxiter': max_iter, 'iprint': -1}
  opt_res=scipy.optimize.minimize(objective_fn,x0,args=(net,all_target_blob_names,targets,target_data_list,tv_lambda,tv_beta),bounds=bounds,method=solver_type,jac=True,options=solver_param)
  print(opt_res)

  data=np.reshape(opt_res.x,net.get_input_blob().shape)[0]
  deproc_img=net.transformer.deprocess(net.inputs[0],data)
  B=np.clip(deproc_img,0,1)
  A=init_img

  print('A',A.shape,A.dtype,A.min(),A.max())
  print('B',B.shape,B.dtype,B.min(),B.max())
  skimage.io.imsave('{}/input.png'.format(root_dir),A)
  skimage.io.imsave('{}/output.png'.format(root_dir),B)

  t1=time.time()
  print('Finished in {} minutes.'.format((t1-t0)/60.0))
示例#5
0
def deepart_identity(image_dims=None,max_iter=3000,hybrid_names=[],hybrid_weights=[],tv_lambda=0.001,tv_beta=2,desc='identity',device_id=0,dataset='lfw_random',count=20,layers=None):
  # Experimenting with making deepart produce the identity function
  t0=time.time()

  # init result dir
  root_dir='results_{}'.format(int(round(t0))) if desc=='' else 'results_{}_{}'.format(int(round(t0)),desc)
  if not os.path.exists(root_dir):
    os.makedirs(root_dir)
  def print(*args):
    with open('{}/log.txt'.format(root_dir),'a') as f:
      f.write(' '.join(str(x) for x in args)+'\n')
    sys.stdout.write(' '.join(str(x) for x in args)+'\n')

  print('image_dims',image_dims)
  print('max_iter',max_iter)
  print('hybrid_names',hybrid_names)
  print('hybrid_weights',hybrid_weights)
  print('tv_lambda',tv_lambda)
  print('tv_beta',tv_beta)
  print('desc',desc)
  print('device_id',device_id)
  print('dataset',dataset)
  print('count',count)
  print('layers',layers)

  if isinstance(dataset,list) or isinstance(dataset,tuple):
    ipathset=list(dataset)
  else:
    with open('dataset/{}.txt'.format(dataset)) as f:
      ipathset=['images/'+x.strip() for x in f.readlines()]
    ipathset=ipathset[:count]

  if layers is None:
    targetset=[
      ('c5',['conv5_1'],[1]),
      ('c4',['conv4_1'],[1]),
      ('c3',['conv3_1'],[1]),
      ('c2',['conv2_1'],[1]),
    ]
  else:
    targetset=[]
    if 'c2' in layers: targetset.append(('c2',['conv2_1'],[1]))
    if 'c3' in layers: targetset.append(('c3',['conv3_1'],[1]))
    if 'c4' in layers: targetset.append(('c4',['conv4_1'],[1]))
    if 'c5' in layers: targetset.append(('c5',['conv5_1'],[1]))

  #modelset=['vggface','vgg']
  modelset=['vgg']

  for model in modelset:

    caffe,net,image_dims=setup_classifier(model=model,image_dims=image_dims,device_id=device_id)

    for tname,blob_names,blob_weights in targetset:

      psnr=[]
      ssim=[]
  
      for ipath1 in ipathset:
  
        np.random.seed(123)
    
        basename=os.path.splitext(os.path.split(ipath1)[1])[0]
        root_dir2='{}/{}/{}'.format(root_dir,model,tname)
        if not os.path.exists(root_dir2):
          os.makedirs(root_dir2)
    
        all_target_blob_names=list(hybrid_names)+list(blob_names)
        targets=[]
        target_data_list=[]
        F=net.extract_features([ipath1],all_target_blob_names,auto_reshape=True)
        for k,v in zip(hybrid_names,hybrid_weights):
          if len(targets)>0 and targets[-1][3]==v:
            targets[-1][1].append(k)
            target_data_list[-1][k]=F[k]
          else:
            targets.append((None,[k],False,v))
            target_data_list.append({k: F[k]})
          print('hybrid',k,v,F[k].shape,F[k].dtype)
        for k,v in zip(blob_names,blob_weights):
          if len(targets)>0 and targets[-1][3]==v:
            targets[-1][1].append(k)
            target_data_list[-1][k]=F[k]
          else:
            targets.append((None,[k],False,v))
            target_data_list.append({k: F[k]})
          print('blob',k,v,F[k].shape,F[k].dtype)
    
        # load ground truth
        A=caffe.io.load_image(ipath1) # ground truth
        B=net.preprocess_inputs([A],auto_reshape=True)
        C=net.transformer.deprocess(net.inputs[0],B)
        D=caffe.io.resize_image(C,A.shape) # best possible result (only preprocess / deprocess)
        print('input',A.shape,A.dtype,A.min(),A.max())
        print('pre',B.shape,B.dtype,B.min(),B.max())
        print('de',C.shape,C.dtype,C.min(),C.max())
        print('re',D.shape,D.dtype,D.min(),D.max())
    
        # optimize
        # Set initial value and reshape net
        init_img=np.random.normal(loc=0.5,scale=0.1,size=A.shape)
        deepart.set_data(net,init_img)
        #x0=np.ravel(init_img).astype(np.float64)
        x0=net.get_input_blob().ravel().astype(np.float64)
        bounds=zip(np.full_like(x0,-128),np.full_like(x0,162))
        solver_type='L-BFGS-B'
        solver_param={'maxiter': max_iter, 'iprint': -1}
        opt_res=scipy.optimize.minimize(deepart.objective_func,x0,args=(net,all_target_blob_names,targets,target_data_list,tv_lambda,tv_beta),bounds=bounds,method=solver_type,jac=True,options=solver_param)
        data=np.reshape(opt_res.x,net.get_input_blob().shape)[0]
        deproc_img=net.transformer.deprocess(net.inputs[0],data)
        Dhat=caffe.io.resize_image(np.clip(deproc_img,0,1),A.shape)

        # evaluate
        print('{} best psnr = {:.4}, ssim = {:.4}'.format(basename,measure.measure_PSNR(A,D,1).mean(),measure.measure_SSIM(A,D,1).mean()))
        print('{} actual psnr = {:.4}, ssim = {:.4}'.format(basename,measure.measure_PSNR(A,Dhat,1).mean(),measure.measure_SSIM(A,Dhat,1).mean()))
        skimage.io.imsave('{}/{}_original.png'.format(root_dir2,basename),A)
        skimage.io.imsave('{}/{}_best.png'.format(root_dir2,basename),D)
        skimage.io.imsave('{}/{}_actual.png'.format(root_dir2,basename),Dhat)
        caption='psnr {:.4}, ssim {:.4}'.format(measure.measure_PSNR(A,Dhat,1).mean(),measure.measure_SSIM(A,Dhat,1).mean())
        subprocess.check_call('convert {root_dir2}/{basename}_original.png {root_dir2}/{basename}_actual.png -size {w}x -font Arial-Italic -pointsize 12 caption:{caption} -append {root_dir2}/eval_{basename}.png'.format(root_dir2=pipes.quote(root_dir2),basename=pipes.quote(basename),caption=pipes.quote(caption),w=A.shape[1],h=A.shape[0]//10),shell=True)
        psnr.append(measure.measure_PSNR(A,Dhat,1).mean())
        ssim.append(measure.measure_SSIM(A,Dhat,1).mean())
  
      print('psnr',psnr)
      print('ssim',ssim)
      psnr=np.asarray(psnr).mean()
      ssim=np.asarray(ssim).mean()
      with open('{}/autoencoder.txt'.format(root_dir),'a') as f:
        f.write('{},{},{},{}\n'.format(model,tname,psnr,ssim))

  t1=time.time()
  print('Finished in {} minutes.'.format((t1-t0)/60.0))
def invert_model(filelist,targetlist,model,image_dims=None,max_iter=3000,hybrid_names=[],hybrid_weights=[],tv_lambda=0.001,tv_beta=2,desc='identity',device_id=0):
  # filelist is a list of images
  # targetlist is a list of targets
  # each target is a (tname,blob_names,blob_weights)
  # tname is a descriptive string
  # blob_names is a list of blobs, must be in forward-to-back order
  # blob_weights is a list of scalar weights, one for each blob
  #
  # Example: targetlist=[('c5',['conv5_1'],[1])]
  # This example will try to reconstruct an image by finding the image which
  # matches the conv5_1 features of that image.
  #
  # The script will test reconstruction for each target for each image.
  t0=time.time()

  # init result dir
  root_dir='results_{}'.format(int(round(t0))) if desc=='' else 'results_{}_{}'.format(int(round(t0)),desc)
  if not os.path.exists(root_dir):
    os.makedirs(root_dir)
  def print(*args):
    with open('{}/log.txt'.format(root_dir),'a') as f:
      f.write(' '.join(str(x) for x in args)+'\n')
    sys.stdout.write(' '.join(str(x) for x in args)+'\n')

  print('filelist',filelist)
  print('targetlist',targetlist)
  print('model',model)
  print('image_dims',image_dims)
  print('max_iter',max_iter)
  print('tv_lambda',tv_lambda)
  print('tv_beta',tv_beta)
  print('desc',desc)
  print('device_id',device_id)

  caffe,net,image_dims=setup_classifier(model=model,image_dims=image_dims,device_id=device_id)

  all_results=[]

  for tname,blob_names,blob_weights in targetlist:

    all_psnr=[]
    all_ssim=[]
  
    for ipath1 in filelist:
  
      np.random.seed(123)
  
      basename=os.path.splitext(os.path.split(ipath1)[1])[0]
      root_dir2='{}/{}/{}'.format(root_dir,model,tname)
      if not os.path.exists(root_dir2):
        os.makedirs(root_dir2)
  
      all_target_blob_names=list(hybrid_names)+list(blob_names)
      targets=[]
      target_data_list=[]
      F=net.extract_features([ipath1],all_target_blob_names,auto_reshape=True)
      for k,v in zip(hybrid_names,hybrid_weights):
        if len(targets)>0 and targets[-1][3]==v:
          targets[-1][1].append(k)
          target_data_list[-1][k]=F[k]
        else:
          targets.append((None,[k],False,v))
          target_data_list.append({k: F[k]})
        print('hybrid',k,v,F[k].shape,F[k].dtype)
      for k,v in zip(blob_names,blob_weights):
        if len(targets)>0 and targets[-1][3]==v:
          targets[-1][1].append(k)
          target_data_list[-1][k]=F[k]
        else:
          targets.append((None,[k],False,v))
          target_data_list.append({k: F[k]})
        print('blob',k,v,F[k].shape,F[k].dtype)
  
      # load ground truth
      A=caffe.io.load_image(ipath1) # ground truth
      B=net.preprocess_inputs([A],auto_reshape=True)
      C=net.transformer.deprocess(net.inputs[0],B)
      D=caffe.io.resize_image(C,A.shape) # best possible result (only preprocess / deprocess)
      print('input',A.shape,A.dtype,A.min(),A.max())
      print('pre',B.shape,B.dtype,B.min(),B.max())
      print('de',C.shape,C.dtype,C.min(),C.max())
      print('re',D.shape,D.dtype,D.min(),D.max())
  
      # optimize
      # Set initial value and reshape net
      init_img=np.random.normal(loc=0.5,scale=0.1,size=A.shape)
      deepart.set_data(net,init_img)
      #x0=np.ravel(init_img).astype(np.float64)
      x0=net.get_input_blob().ravel().astype(np.float64)
      bounds=zip(np.full_like(x0,-128),np.full_like(x0,162))
      solver_type='L-BFGS-B'
      solver_param={'maxiter': max_iter, 'iprint': -1}
      opt_res=scipy.optimize.minimize(deepart.objective_func,x0,args=(net,all_target_blob_names,targets,target_data_list,tv_lambda,tv_beta),bounds=bounds,method=solver_type,jac=True,options=solver_param)
      data=np.reshape(opt_res.x,net.get_input_blob().shape)[0]
      deproc_img=net.transformer.deprocess(net.inputs[0],data)
      Dhat=caffe.io.resize_image(np.clip(deproc_img,0,1),A.shape)
      all_results.append(Dhat)

      # evaluate
      print('{} {} best psnr = {:.4}, ssim = {:.4}'.format(tname,basename,measure.measure_PSNR(A,D,1).mean(),measure.measure_SSIM(A,D,1).mean()))
      psnr=measure.measure_PSNR(A,Dhat,1).mean()
      ssim=measure.measure_SSIM(A,Dhat,1).mean()
      print('{} {} actual psnr = {:.4}, ssim = {:.4}'.format(tname,basename,psnr,ssim))
      skimage.io.imsave('{}/{}_original.png'.format(root_dir2,basename),A)
      skimage.io.imsave('{}/{}_best.png'.format(root_dir2,basename),D)
      skimage.io.imsave('{}/{}_actual.png'.format(root_dir2,basename),Dhat)
      caption='psnr {:.4}, ssim {:.4}'.format(psnr,ssim)
      subprocess.check_call('convert {root_dir2}/{basename}_original.png {root_dir2}/{basename}_actual.png -size {w}x -font Arial-Italic -pointsize 12 caption:{caption} -append {root_dir2}/eval_{basename}.png'.format(root_dir2=pipes.quote(root_dir2),basename=pipes.quote(basename),caption=pipes.quote(caption),w=A.shape[1],h=A.shape[0]//10),shell=True)
      all_psnr.append(psnr)
      all_ssim.append(ssim)
  
    print(tname,'psnr',psnr)
    print(tname,'ssim',ssim)
    mean_psnr=np.asarray(all_psnr).mean()
    mean_ssim=np.asarray(all_ssim).mean()
    with open('{}/autoencoder.txt'.format(root_dir),'a') as f:
      f.write('{},{},{},{}\n'.format(model,tname,mean_psnr,mean_ssim))

  t1=time.time()
  print('Finished in {} minutes.'.format((t1-t0)/60.0))
  return root_dir,np.asarray(all_results)