Пример #1
0
def makeWarpedResidual(ds,do,ra=None,rt=None,rc=None,
                       dw=None,v=None,adjoint=False):
  reverseOrder = False
  if dw is None:
    dw = like(ds)
  if v is None:
    v = like(ds)
  if reverseOrder:
    warp(do,ds,dw,v,adjoint=adjoint) # wrong order
    mul(-1.0,v,v)
    if ra is not None:
      sub(dw,do,ra)
    if rt is not None:
      mul(v,timeDerivative(ds),rt)
    if rc is not None:
      sub(dw,do,rc)
      add(mul(v,timeDerivative(ds)),rc,rc)
  else:
    if (rt is None) and (rc is not None):
      rt = like(ds)
    warp(ds,do,dw,v,rt,adjoint=adjoint) # right order
    if ra is not None:
      sub(ds,dw,ra)
    if rc is not None:
      sub(ds,dw,rc)
      add(rt,rc,rc)
Пример #2
0
def makeWarpedResidual(ds,
                       do,
                       ra=None,
                       rt=None,
                       rc=None,
                       dw=None,
                       v=None,
                       adjoint=False):
    reverseOrder = False
    if dw is None:
        dw = like(ds)
    if v is None:
        v = like(ds)
    if reverseOrder:
        warp(do, ds, dw, v, adjoint=adjoint)  # wrong order
        mul(-1.0, v, v)
        if ra is not None:
            sub(dw, do, ra)
        if rt is not None:
            mul(v, timeDerivative(ds), rt)
        if rc is not None:
            sub(dw, do, rc)
            add(mul(v, timeDerivative(ds)), rc, rc)
    else:
        if (rt is None) and (rc is not None):
            rt = like(ds)
        warp(ds, do, dw, v, rt, adjoint=adjoint)  # right order
        if ra is not None:
            sub(ds, dw, ra)
        if rc is not None:
            sub(ds, dw, rc)
            add(rt, rc, rc)
Пример #3
0
def warpTT():
  f = getData1("Book1.txt")
  g,s = warp(f)
  plot1(f,"f")
  plot1(g,"g")
  plot1(s,"s")
  ftt = getData2("tt.txt")
  gtt = getData2("tt2.txt")
  plot2(ftt,title="ftt")
  plot2(gtt,title="gtt")
  plotPointSlices(ftt,g=gtt,title="tt",i2Label="tau")
  dw = DynamicWarping(-25,25)
  dw.setErrorExtrapolation(ErrorExtrapolation.REFLECT)
  dw.setStrainMax(0.5)
  dw.setShiftSmoothing(2.0)
  gttw = zerofloat(len(gtt[0]),len(gtt))
  ftta = zerofloat(len(gtt[0]),len(gtt))
  for it in range(len(ftt)):
    fa = abs(ftt[it])
    ga = abs(gtt[it])
    u = dw.findShifts(fa,ga)
    #gttw[it] = dw.applyShifts(u,gtt[it])
    gttw[it] = dw.applyShifts(u,ga)
    ftta[it] = fa
  plotPointSlices(ftta,g=gttw,title="tt warped",i2Label="tau")
  plot2(gttw,title="gtt warped")
Пример #4
0
def link_nonlocal(imnum1, warp, imnum2, shape, warpshape, links):
    print "Cross links", imnum, imnum2
    num_i_steps = int(shape[0] / step) + 1
    num_j_steps = int(shape[1] / step) + 1
    i_vals = np.linspace(0, 1.0, num_i_steps)
    j_vals = np.linspace(0, 1.0, num_j_steps)
    delta_i = i_vals[1]
    delta_j = j_vals[1]
    for i in range(num_i_steps):
        for j in range(num_j_steps):
            dest_j, dest_i = warp(i_vals[i], j_vals[j])
            if i == j:
                print i_vals[i] * (shape[0] - 1), j_vals[j] * (shape[1] - 1), "warped to", \
                dest_i * (shape[0] - 1), dest_j * (shape[1] - 1)
            if (dest_i > 0) and (dest_j > 0) and \
                    (dest_i < 1.0) and (dest_j < 1.0):
                base_i = np.searchsorted(i_vals, dest_i) - 1
                base_j = np.searchsorted(j_vals, dest_j) - 1
                d_i = (dest_i - i_vals[base_i]) / delta_i
                d_j = (dest_j - j_vals[base_j]) / delta_j
                assert d_i >= 0 and d_i < 1.0
                assert d_j >= 0 and d_j < 1.0
                links[imnum1, i, j, imnum2, base_i, base_j] = (k_link, hypot(d_i, d_j))
                links[imnum1, i, j, imnum2, base_i + 1, base_j] = (k_link, hypot((1.0 - d_i), d_j))
                links[imnum1, i, j, imnum2, base_i, base_j + 1] = (k_link, hypot(d_i, 1.0 - d_j))
                links[imnum1, i, j, imnum2, base_i + 1, base_j + 1] = (k_link, hypot(1.0 - d_i, 1.0 - d_j))
Пример #5
0
def makeG():
  f = getData1("Book1.txt")
  g,s = warp(f)
  plot1(f,"f")
  plot1(g,"g")
  plot1(s,"s")
  writeData(g,"Book2.txt")
Пример #6
0
def get_predicted_image(model,X,warp):
    ''' Function to compute and plot a real map.
    data: input X numpy tensor
    warp: initialized warp class object
    Returns:
    img: numpy array image
    '''
    if torch.cuda.is_available:
        output = model(Variable(torch.from_numpy(X)).cuda().unsqueeze(0))
        X = Variable(torch.from_numpy(X).cuda().unsqueeze(0))
        img = warp(X[:, -1].unsqueeze(1), output)
        predicted = img.data.cpu().numpy().reshape(64,64)
    else:
        print('Error: CUDA is not avaliable')
    return np.expand_dims(predicted, axis=0)
Пример #7
0
def warpFG():
  f = getData1("Book1.txt")
  g,s = warp(f)
  plot1(f,"f")
  plot1(g,"g")
  plot1(s,"s")
  dw = DynamicWarping(umin,umax)
  dw.setStrainMax(0.5)
  dw.setErrorExtrapolation(ErrorExtrapolation.REFLECT)
  e = dw.computeErrors(f,g)
  d = dw.accumulateForward(e)
  u = dw.backtrackReverse(d,e)
  et = Transpose.transpose12(e)
  su = Sampling(umax-umin+1,1.0,umin)
  plot2(et,s2=su,title="alignment errors")
  plot2(et,p1=u,p2=s,s2=su,title="alignment errors")
Пример #8
0
def embed_nonlocal(imnum1, warp, imnum2, shape, weights):
    num_i_steps = int(shape[0] / step) + 1
    num_j_steps = int(shape[1] / step) + 1
    i_vals = np.linspace(0, shape[0] - 1, num_i_steps)
    j_vals = np.linspace(0, shape[1] - 1, num_j_steps)
    delta_i = i_vals[1]
    delta_j = j_vals[1]
    for i in range(num_i_steps):
        for j in range(num_j_steps):
            dest_j, dest_i = warp(i_vals[i], j_vals[j])
            print i_vals[i], j_vals[j], "warpted to", dest_i, dest_j
            if (dest_i > 0) and (dest_j > 0) and \
                    (dest_i < shape[0] - 1) and (dest_j < shape[1] - 1):
                base_i = np.searchsorted(i_vals, dest_i) - 1
                base_j = np.searchsorted(j_vals, dest_j) - 1
                w_i = 1.0 - (dest_i - i_vals[base_i]) / delta_i
                w_j = 1.0 - (dest_j - j_vals[base_j]) / delta_j
                weights[imnum1, i, j, imnum2, base_i, base_j] = intra_weight * w_i * w_j
                weights[imnum1, i, j, imnum2, base_i, base_j + 1] = intra_weight * w_i * (1.0 - w_j)
                weights[imnum1, i, j, imnum2, base_i + 1, base_j] = intra_weight * (1.0 - w_i) * w_j
                weights[imnum1, i, j, imnum2, base_i + 1, base_j + 1] = intra_weight * (1.0 - w_i) * (1.0 - w_j)
Пример #9
0
def warpTTImages():
  f = getData1("Book1.txt")
  g,s = warp(f)
  plot1(f,"f")
  plot1(g,"g")
  plot1(s,"s")
  ftt = getData2("tt.txt")
  gtt = getData2("tt2.txt")
  plot2(ftt,title="ftt")
  plot2(gtt,title="gtt")
  dw = DynamicWarping(-25,25)
  dw.setStrainMax(0.2,0.2)
  dw.setErrorSmoothing(2)
  dw.setShiftSmoothing(2.0,2.0)
  e = dw.computeErrors(ftt,gtt)
  et = Transpose.transpose312(e)
  plot3(et,p=u)
  u = dw.findShifts(ftt,gtt)
  #d = dw.accumulateForward(u)
  #uu = dw.backtrackReverse(d,u)
  h = dw.applyShifts(u,gtt)
  plot2(u,title="u")
  plot2(h,title="gtt warped")
Пример #10
0
 def compute(self,im):
   ds = zerofloat(nt,nr)
   read(datDir+'ds_'+str(im)+'.dat',ds)
   warp(do,ds,v=v[im]) # wrong order gives v as a function of do time
Пример #11
0
def train(epoch):
    #losses_by_batch=[]
    cum_loss = 0
    model.train()
    # losses_by_batch = []
    for batch_idx, (data, target) in enumerate(train_loader):
        # Convert to cuda tensors if cuda flag is true
        if torch.cuda.is_available:
            data, target = data.cuda(), target.cuda()
        
        data, targets = Variable(data), Variable(target)
        optimizer.zero_grad()
        # Autoregression loop
        if(args.train_type == 'autoreg'):
            sequence_loss = 0    
            for step in range(args.sequence_lenght):
                w = model(data)
                imgs = warp(data[:, -1].unsqueeze(1), w)
                data = torch.cat([data[:, 1:], imgs], 1)
                current_loss = criterion(imgs, targets)
                sequence_loss += current_loss
            loss = sequence_loss / args.sequence_lenght
        else:
            # Input tensor to get the warp
            w = model(data)
            #print('w', np.shape(w))
            # Get transformed image using the ward schema
            imgs = warp(data[:, -1].unsqueeze(1), w)
            # Compute MSE loss
            loss = criterion(imgs, targets)
        cum_loss += loss.data[0]
        
        #print('Batch : {}  |  loss : {}   |   cum_loss : {}'.format(batch_idx, loss.data[0], cum_loss))
	    
        # Set gradients to zero and backpropagate

        loss.backward()
        optimizer.step()
        # Printing 
        if batch_idx % 50 == 0:
            #losses_by_batch.append(loss.data[0])
            # Plot random warping map on wisdom	
            r = np.random.randint(0, len(data))
            w = w[r].data.cpu().numpy()
            #flow_clr = flow_to_im(w)
            #viz.image(flow_clr, win=str(2 * len(imgs) + 1),
            #          opts=dict(title='output w {}'.format(1)))

            # Plot real radar image and its prediction on visdom
            #img = imgs[r, 0].data.cpu()
            #viz.heatmap(img, win=str(
            #             + 1), opts=dict(title='output image {}'.format(1)))

            #img_target = targets[r, 0].data.cpu()
            #viz.heatmap(img_target, win=str(
            #            len(imgs) + 1), opts=dict(title='target image {}'.format(1)))

    #print('Average train epoch rmse =',cum_loss)
    losses.append(cum_loss/((batch_idx+1)))
    # Saving maps and images (one per epoch)    
    warp_maps.append(w)
    radar_maps.append(imgs)
	
    # Compute validation loss
    epoch_vloss = 0
    for batch_idx, (data, target) in enumerate(val_loader):
  	#convert to cuda tensors if cuda flag is true
        if torch.cuda.is_available:
            data, target = data.cuda(), target.cuda()  
        data, target = Variable(data), Variable(target)
        	# Compute validation error of each epoch
           	# Input tensor to get the warp
        w = model(data)
        	# Get transformed image using the ward schema
        img = warp(data[:, -1].unsqueeze(1), w)
        	# Compute MSE loss
        val_loss = criterion(img, target)
        epoch_vloss += val_loss.data[0]
        
    total_val_loss.append(epoch_vloss/((batch_idx+1)))
Пример #12
0
 def compute(self, im):
     ds = zerofloat(nt, nr)
     read(datDir + 'ds_' + str(im) + '.dat', ds)
     warp(do, ds,
          v=v[im])  # wrong order gives v as a function of do time
Пример #13
0
 def test_mosaic(self):
     warped = []
     for i in xrange(644, 646):
         warped.append(warp('dji_0%d.jpg' % i))
     mosaic(warped)
     pass
Пример #14
0
 def test_warp(self):
     warped = warp('dji_0649.jpg')
     pass