Example #1
0
def main(csv=True, coords=None, gps=False):  # pylint: disable=unused-argument
  if coords is None:
       coords = np.array([[ -1.353887,  50.965639],
       [ -1.386731,  50.935744],
       [ -1.401907,  50.925657],
       [ -1.368191,  50.914509],
       [ -1.354101,  50.90988 ],
       [ -1.34853 ,  50.902636],
       [ -0.964885,  50.81373 ]])
  n = coords.shape[0]

  print('%s: Loading nnet...' % time.ctime())
  # For the validation and test data, we'll just hold the entire dataset in
  # one constant node.
  #test_data_node = tf.constant(data)
  test_data_node = tf.placeholder( tf.float32, shape=(BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS))
  test_data_node2 = tf.placeholder( tf.float32, shape=(n%BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS))
  test_centre_node = tf.placeholder( tf.float32, shape=(BATCH_SIZE, SINGLE_SIZE, SINGLE_SIZE, NUM_CHANNELS))
  test_centre_node2 = tf.placeholder( tf.float32, shape=(n%BATCH_SIZE, SINGLE_SIZE, SINGLE_SIZE, NUM_CHANNELS))

  # The variables below hold all the trainable weights. They are passed an
  # initial value which will be assigned when when we call:
  # {tf.initialize_all_variables().run()}
  depth1=SINGLE_SIZE*SINGLE_SIZE*NUM_CHANNELS
  conv1_weights = tf.constant( # shape: [SINGLE_SIZE, SINGLE_SIZE, NUM_CHANNELS, depth1]
      np.reshape(np.diag([1 for _ in range(depth1)]).astype('float32'),[SINGLE_SIZE, SINGLE_SIZE, NUM_CHANNELS, depth1]))
  #centre_weights =tf.constant( # shape: [IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS, depth1]
  #    get_centre(depth1))
  #conv1_biases = tf.constant(np.zeros([depth1], dtype='float32'))
  # conv2_weights = tf.Variable(tf.concat(3,[tf.ones([1,1,depth1,1])*2*(.5-i%2) for i in range(2*depth1)]))
  conv2_weights = tf.concat(3,[tf.ones([1,1,depth1,1]),-tf.ones([1,1,depth1,1])])
  kernel_weights = tf.ones([SINGLE_SIZE, SINGLE_SIZE, NUM_CHANNELS, 1])  # gaussian or linear kernel to use instead avg_pool

  def model(data, centre_data,  train=False):
    """The Model definition."""
    # 1. extract features from centre and every (other) patch in the same way (first attempt: identity)
    conv1 = tf.nn.conv2d(data, conv1_weights, strides=[1, PIXELperLABEL, PIXELperLABEL, 1], padding='VALID')
    centre = tf.nn.conv2d(centre_data, conv1_weights, strides=[1, PIXELperLABEL, PIXELperLABEL, 1], padding='VALID')
    square_dim = conv1.get_shape().as_list()[2]
    rep_centre = tf.concat(1, [tf.concat(2,[centre for _ in range(square_dim)]) for _ in range(square_dim)])
    layer1 = tf.add(conv1, -rep_centre)
    # 2. compute absolute values of the differences (there must be a better way ...)
    conv2 = tf.nn.depthwise_conv2d(layer1, conv2_weights, strides = [1,1,1,1], padding='SAME')
    # conv2 = tf.nn.conv2d(relu1, conv2_weights, strides=[1, 1, 1, depth1], padding='VALID')
    conv2_shape = conv2.get_shape().as_list()
    layer2 = tf.reshape(conv2,[conv2_shape[0], conv2_shape[1] * conv2_shape[2] *conv2_shape[3]/2, 2, 1])
    pool1 = tf.nn.max_pool(layer2, ksize=[1, 1, 2, 1], strides=[1, 1, 2, 1], padding='SAME')
    pool1_shape = pool1.get_shape().as_list()
    layer3 = tf.reshape(pool1,[pool1_shape[0], pool1_shape[1]/depth1, depth1, 1])
    # 3. aggregate the absolute pixel differences (first attempt: average)
    out_pool = tf.nn.avg_pool(layer3, ksize=[1, 1, depth1, 1], strides=[1, 1, depth1, 1], padding='SAME')
    out_shape = out_pool.get_shape().as_list()
    print('Dimensions of network Tensors: [minibatch size, ..dims.. , channels]')
    print(data.get_shape().as_list(), '->',
            conv1.get_shape().as_list(), '->', layer1.get_shape().as_list(), '->',
            conv2.get_shape().as_list(), '->', layer2.get_shape().as_list(), '->',
            pool1.get_shape().as_list(), '->', layer3.get_shape().as_list(), '->',
            out_shape)
    return tf.reshape(out_pool, [out_shape[0], out_shape[1]])

  # Create a local session to run this computation.
  with tf.Session() as s:
    # Run all the initializers to prepare the trainable parameters.
    tf.initialize_all_variables().run()
    num_batches = np.floor(n/BATCH_SIZE).astype('int16')
    print('%s: Initialized! (Loading data in %d minibatches))' % (time.ctime(), num_batches+1))
    if gps:
        from get_data.map_coverage import MercatorProjection, G_Point, G_LatLng
        from get_data.labels_GPS import labels_GPS, labels_suspect, labels_GPS_list
        suspect = np.zeros((0,2))
    else:
        suspect = np.zeros((0,LABEL_SIZE**2))

    for step in range(num_batches):
      offset = (step * BATCH_SIZE)
      batch_data = load_dataset(coords=coords[offset:(offset + BATCH_SIZE), :])
      feed_dict = {test_data_node: batch_data,  test_centre_node: get_centre(batch_data)}
      # Run the graph and fetch some of the nodes.
      test_predictions = s.run(model(test_data_node, test_centre_node), feed_dict=feed_dict)
      print('.'),
      if gps:
          suspect = np.concatenate([suspect, labels_GPS_list(labels= np.array(test_predictions[:,:,1]), coords=coords[offset:(offset + BATCH_SIZE), :], pixels= PIXELperLABEL, zoom=ZOOM_LEVEL)], axis=0 )
      else:
          suspect = np.concatenate([suspect, np.array(test_predictions[:,:,1])], axis=0)
      if (step+1)%10==0:
          print('%s: Processing batch %d out of %d' %(time.ctime(), step, num_batches+1))
          np.savetxt('tmp_images/suspect_tmp'+time.ctime()[3:10]+'.csv', suspect,  fmt='%.6f', delimiter=', ')

    if n%BATCH_SIZE > 0 :
      offset = num_batches * BATCH_SIZE
      batch_data = load_dataset(coords=coords[offset:n, :])
      feed_dict = {test_data_node2: batch_data, test_centre_node2: get_centre(batch_data)}
      # Run the graph and fetch some of the nodes.
      test_predictions = s.run(model(test_data_node2, test_centre_node2), feed_dict=feed_dict)
      if gps:
          suspect = np.concatenate([suspect, labels_GPS_list(labels= np.array(test_predictions[:,:,1]), coords=coords[offset:n, :], pixels= PIXELperLABEL, zoom=ZOOM_LEVEL) ], axis=0 )
      else:
          suspect = np.concatenate([suspect, np.array(test_predictions)], axis=0)

    # Finally save the result!
    print('%s: Result with %d rows and  %d columns. ' % (time.ctime(), suspect.shape[0], suspect.shape[1]))
    if csv:
      np.savetxt('tmp_images/centre_heat.csv', suspect,  fmt='%.6f', delimiter=', ')
    else: return(suspect)
    #np.savetxt('tmp_images/ids.csv', data_matrix,  fmt='%.6f', delimiter=', ')
    s.close()
Example #2
0
def main(csv=True, coords=None, gps=False):  # pylint: disable=unused-argument
  if coords is None:
       coords = np.genfromtxt('tmp_images/assemble_ids.csv', delimiter=',', skip_header= False)[:,1:3]
  n = coords.shape[0]

  print('%s: Loading nnet...' % time.ctime())
  # For the validation and test data, we'll just hold the entire dataset in
  # one constant node.
  #test_data_node = tf.constant(data)

  test_data_node = tf.placeholder(
      tf.float32, shape=(BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS))
  test_data_node2 = tf.placeholder(
      tf.float32, shape=(n%BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS))
  # The variables below hold all the trainable weights. They are passed an
  # initial value which will be assigned when when we call:
  # {tf.initialize_all_variables().run()}
  depth1=32
  conv1_weights = tf.Variable(
      tf.truncated_normal([5, 5, NUM_CHANNELS, depth1],  # 5x5 filter, depth 32.
                          stddev=0.1,
                          seed=SEED))
  conv1_biases = tf.Variable(tf.zeros([depth1]))
  depth2=64
  conv2_weights = tf.Variable(
      tf.truncated_normal([5, 5, depth1, depth2],
                          stddev=0.1,
                          seed=SEED))
  conv2_biases = tf.Variable(tf.constant(0.1, shape=[depth2]))
  depth3=256#*koef
  hidden2_size = SINGLE_SIZE/4  #((IMAGE_SIZE-4)/2-4)/2
  fc1_weights = tf.Variable(  # fully connected, depth 512.    ! but input nodes kept in the shape of square !  for future assembly into larger image
      tf.truncated_normal([hidden2_size, hidden2_size, depth2, depth3],
                          stddev=0.1,
                          seed=SEED))
  fc1_biases = tf.Variable(tf.constant(0.1, shape=[depth3]))
  fc2_weights = tf.Variable(
      tf.truncated_normal([1, 1, depth3, NUM_LABELS],
                          stddev=0.1,
                          seed=SEED))
  fc2_biases = tf.Variable(tf.constant(0.1, shape=[NUM_LABELS]))
  # We will replicate the model structure for the training subgraph, as well
  # as the evaluation subgraphs, while sharing the trainable parameters.
  def model(data, train=False):
    """The Model definition."""
    # 2D convolution, with 'SAME' padding (i.e. the output feature map has
    # the same size as the input). Note that {strides} is a 4D array whose
    # shape matches the data layout: [image index, y, x, depth].

    conv1 = tf.nn.conv2d(data,
                        conv1_weights,
                        strides=[1, 1, 1, 1],
                        padding='SAME')
    # Bias and rectified linear non-linearity.
    relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))
    # Max pooling. The kernel size spec {ksize} also follows the layout of
    # the data. Here we have a pooling window of 2, and a stride of 2.
    pool1 = tf.nn.max_pool(relu1,
                          ksize=[1, 2, 2, 1],
                          strides=[1, 2, 2, 1],
                          padding='SAME')
    conv2 = tf.nn.conv2d(pool1,
                        conv2_weights,
                        strides=[1, 1, 1, 1],
                        padding='SAME')
    relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))
    pool2 = tf.nn.max_pool(relu2,
                          ksize=[1, 2, 2, 1],
                          strides=[1, 2, 2, 1],
                          padding='SAME')
    # Fully connected layer on pretrained patches 24x24 for single caravan sites.
    hidden_pool = tf.nn.conv2d(pool2,
                        fc1_weights,
                        strides=[1, 1, 1, 1],
                        padding='VALID')
    hidden = tf.nn.relu(tf.nn.bias_add(hidden_pool, fc1_biases))
    # Add a 50% dropout during training only. Dropout also scales
    # activations such that no rescaling is needed at evaluation time.
    if train:
      hidden = tf.nn.dropout(hidden, 0.5, seed=SEED)
    single_pool = tf.nn.conv2d(hidden,
                        fc2_weights,
                        strides=[1, 1, 1, 1],
                        padding='SAME')
    out_pool = tf.nn.bias_add(single_pool, fc2_biases)
    out_shape = out_pool.get_shape().as_list()
    reshape = tf.reshape(out_pool,
        [out_shape[0] * out_shape[1] * out_shape[2] , out_shape[3]])
    if train:
      print('Dimensions of network Tensors: [minibatch size, ..dims.. , channels]')
      print(data.get_shape().as_list(), '->',
            conv1.get_shape().as_list(), '->', pool1.get_shape().as_list(), '->',
            conv2.get_shape().as_list(), '->', pool2.get_shape().as_list(), '->',
            hidden.get_shape().as_list(), '->', single_pool.get_shape().as_list(), '->',
            out_shape)
    #return tf.nn.bias_add(reshape, assembly_biases)
    out_softmax = tf.nn.softmax(reshape)
    return tf.reshape(out_softmax,[out_shape[0],  out_shape[1] * out_shape[2], out_shape[3]])

  # Create a local session to run this computation.
  with tf.Session() as s:
    # Run all the initializers to prepare the trainable parameters.
    tf.initialize_all_variables().run()
    saver = tf.train.Saver({'conv1_weights':conv1_weights, 'conv1_biases':conv1_biases,
                            'conv2_weights':conv2_weights, 'conv2_biases':conv2_biases,
                            'fc1_weights':fc1_weights, 'fc1_biases':fc1_biases,
                            'fc2_weights':fc2_weights, 'fc2_biases':fc2_biases})
    # Load pretrained parameters for single 24*24 patches.
    saver.restore(s, NET_FILE)
    num_batches = np.floor(n/BATCH_SIZE).astype('int16')
    print('%s: Initialized! (Loading data in %d minibatches))' % (time.ctime(), num_batches+1))
    if gps:
        from get_data.map_coverage import MercatorProjection, G_Point, G_LatLng
        from get_data.labels_GPS import labels_GPS, labels_suspect, labels_GPS_list
        suspect = np.zeros((0,2))
    else:
        suspect = np.zeros((0,LABEL_SIZE**2))

    for step in range(num_batches):
      offset = (step * BATCH_SIZE)
      batch_data = load_dataset(coords=coords[offset:(offset + BATCH_SIZE), :])
      feed_dict = {test_data_node: batch_data}
      # Run the graph and fetch some of the nodes.
      test_predictions = s.run(model(test_data_node), feed_dict=feed_dict)
      print('.'),
      if gps:
          suspect = np.concatenate([suspect, labels_GPS_list(labels= np.array(test_predictions[:,:,1]), coords=coords[offset:(offset + BATCH_SIZE), :], pixels= PIXELperLABEL, zoom=ZOOM_LEVEL)], axis=0 )
      else:
          suspect = np.concatenate([suspect, np.array(test_predictions[:,:,1])], axis=0)
      if (step+1)%10==0:
          print('%s: Processing batch %d out of %d' %(time.ctime(), step, num_batches+1))
          np.savetxt('tmp_images/suspect_all'+time.ctime()[3:10]+'.csv', suspect,  fmt='%.6f', delimiter=', ')

    if n%BATCH_SIZE > 0 :
      offset = num_batches * BATCH_SIZE
      batch_data = load_dataset(coords=coords[offset:n, :])
      feed_dict = {test_data_node2: batch_data}
      # Run the graph and fetch some of the nodes.
      test_predictions = s.run(model(test_data_node2), feed_dict=feed_dict)
      if gps:
          suspect = np.concatenate([suspect, labels_GPS_list(labels= np.array(test_predictions[:,:,1]), coords=coords[offset:n, :], pixels= PIXELperLABEL, zoom=ZOOM_LEVEL) ], axis=0 )
      else:
          suspect = np.concatenate([suspect, np.array(test_predictions[:,:,1])], axis=0)

    # Finally save the result!
    print('%s: Result with %d rows and  %d columns. ' % (time.ctime(), suspect.shape[0], suspect.shape[1]))
    if csv:
      np.savetxt('tmp_images/assemble1_susp.csv', suspect,  fmt='%.6f', delimiter=', ')
    else: return(suspect)
    #np.savetxt('tmp_images/ids.csv', data_matrix,  fmt='%.6f', delimiter=', ')
    s.close()
Example #3
0
def main(csv=True, coords=None, gps=False):  # pylint: disable=unused-argument
    if coords is None:
        coords = np.genfromtxt('tmp_images/assemble_ids.csv',
                               delimiter=',',
                               skip_header=False)[:, 1:3]
    n = coords.shape[0]

    print('%s: Loading nnet...' % time.ctime())
    # For the validation and test data, we'll just hold the entire dataset in
    # one constant node.
    #test_data_node = tf.constant(data)

    test_data_node = tf.placeholder(tf.float32,
                                    shape=(BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE,
                                           NUM_CHANNELS))
    test_data_node2 = tf.placeholder(tf.float32,
                                     shape=(n % BATCH_SIZE, IMAGE_SIZE,
                                            IMAGE_SIZE, NUM_CHANNELS))
    # The variables below hold all the trainable weights. They are passed an
    # initial value which will be assigned when when we call:
    # {tf.initialize_all_variables().run()}
    depth1 = 32
    conv1_weights = tf.Variable(
        tf.truncated_normal(
            [5, 5, NUM_CHANNELS, depth1],  # 5x5 filter, depth 32.
            stddev=0.1,
            seed=SEED))
    conv1_biases = tf.Variable(tf.zeros([depth1]))
    depth2 = 64
    conv2_weights = tf.Variable(
        tf.truncated_normal([5, 5, depth1, depth2], stddev=0.1, seed=SEED))
    conv2_biases = tf.Variable(tf.constant(0.1, shape=[depth2]))
    depth3 = 256  #*koef
    hidden2_size = SINGLE_SIZE / 4  #((IMAGE_SIZE-4)/2-4)/2
    fc1_weights = tf.Variable(  # fully connected, depth 512.    ! but input nodes kept in the shape of square !  for future assembly into larger image
        tf.truncated_normal([hidden2_size, hidden2_size, depth2, depth3],
                            stddev=0.1,
                            seed=SEED))
    fc1_biases = tf.Variable(tf.constant(0.1, shape=[depth3]))
    fc2_weights = tf.Variable(
        tf.truncated_normal([1, 1, depth3, NUM_LABELS], stddev=0.1, seed=SEED))
    fc2_biases = tf.Variable(tf.constant(0.1, shape=[NUM_LABELS]))

    # We will replicate the model structure for the training subgraph, as well
    # as the evaluation subgraphs, while sharing the trainable parameters.
    def model(data, train=False):
        """The Model definition."""
        # 2D convolution, with 'SAME' padding (i.e. the output feature map has
        # the same size as the input). Note that {strides} is a 4D array whose
        # shape matches the data layout: [image index, y, x, depth].

        conv1 = tf.nn.conv2d(data,
                             conv1_weights,
                             strides=[1, 1, 1, 1],
                             padding='SAME')
        # Bias and rectified linear non-linearity.
        relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))
        # Max pooling. The kernel size spec {ksize} also follows the layout of
        # the data. Here we have a pooling window of 2, and a stride of 2.
        pool1 = tf.nn.max_pool(relu1,
                               ksize=[1, 2, 2, 1],
                               strides=[1, 2, 2, 1],
                               padding='SAME')
        conv2 = tf.nn.conv2d(pool1,
                             conv2_weights,
                             strides=[1, 1, 1, 1],
                             padding='SAME')
        relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))
        pool2 = tf.nn.max_pool(relu2,
                               ksize=[1, 2, 2, 1],
                               strides=[1, 2, 2, 1],
                               padding='SAME')
        # Fully connected layer on pretrained patches 24x24 for single caravan sites.
        hidden_pool = tf.nn.conv2d(pool2,
                                   fc1_weights,
                                   strides=[1, 1, 1, 1],
                                   padding='VALID')
        hidden = tf.nn.relu(tf.nn.bias_add(hidden_pool, fc1_biases))
        # Add a 50% dropout during training only. Dropout also scales
        # activations such that no rescaling is needed at evaluation time.
        if train:
            hidden = tf.nn.dropout(hidden, 0.5, seed=SEED)
        single_pool = tf.nn.conv2d(hidden,
                                   fc2_weights,
                                   strides=[1, 1, 1, 1],
                                   padding='SAME')
        out_pool = tf.nn.bias_add(single_pool, fc2_biases)
        out_shape = out_pool.get_shape().as_list()
        reshape = tf.reshape(
            out_pool,
            [out_shape[0] * out_shape[1] * out_shape[2], out_shape[3]])
        if train:
            print(
                'Dimensions of network Tensors: [minibatch size, ..dims.. , channels]'
            )
            print(data.get_shape().as_list(), '->',
                  conv1.get_shape().as_list(), '->',
                  pool1.get_shape().as_list(), '->',
                  conv2.get_shape().as_list(), '->',
                  pool2.get_shape().as_list(), '->',
                  hidden.get_shape().as_list(), '->',
                  single_pool.get_shape().as_list(), '->', out_shape)
        #return tf.nn.bias_add(reshape, assembly_biases)
        out_softmax = tf.nn.softmax(reshape)
        return tf.reshape(
            out_softmax,
            [out_shape[0], out_shape[1] * out_shape[2], out_shape[3]])

    # Create a local session to run this computation.
    with tf.Session() as s:
        # Run all the initializers to prepare the trainable parameters.
        tf.initialize_all_variables().run()
        saver = tf.train.Saver({
            'conv1_weights': conv1_weights,
            'conv1_biases': conv1_biases,
            'conv2_weights': conv2_weights,
            'conv2_biases': conv2_biases,
            'fc1_weights': fc1_weights,
            'fc1_biases': fc1_biases,
            'fc2_weights': fc2_weights,
            'fc2_biases': fc2_biases
        })
        # Load pretrained parameters for single 24*24 patches.
        saver.restore(s, NET_FILE)
        num_batches = np.floor(n / BATCH_SIZE).astype('int16')
        print('%s: Initialized! (Loading data in %d minibatches))' %
              (time.ctime(), num_batches + 1))
        if gps:
            from get_data.map_coverage import MercatorProjection, G_Point, G_LatLng
            from get_data.labels_GPS import labels_GPS, labels_suspect, labels_GPS_list
            suspect = np.zeros((0, 2))
        else:
            suspect = np.zeros((0, LABEL_SIZE**2))

        for step in range(num_batches):
            offset = (step * BATCH_SIZE)
            batch_data = load_dataset(coords=coords[offset:(offset +
                                                            BATCH_SIZE), :])
            feed_dict = {test_data_node: batch_data}
            # Run the graph and fetch some of the nodes.
            test_predictions = s.run(model(test_data_node),
                                     feed_dict=feed_dict)
            print('.'),
            if gps:
                suspect = np.concatenate([
                    suspect,
                    labels_GPS_list(
                        labels=np.array(test_predictions[:, :, 1]),
                        coords=coords[offset:(offset + BATCH_SIZE), :],
                        pixels=PIXELperLABEL,
                        zoom=ZOOM_LEVEL)
                ],
                                         axis=0)
            else:
                suspect = np.concatenate(
                    [suspect, np.array(test_predictions[:, :, 1])], axis=0)
            if (step + 1) % 10 == 0:
                print('%s: Processing batch %d out of %d' %
                      (time.ctime(), step, num_batches + 1))
                np.savetxt('tmp_images/suspect_all' + time.ctime()[3:10] +
                           '.csv',
                           suspect,
                           fmt='%.6f',
                           delimiter=', ')

        if n % BATCH_SIZE > 0:
            offset = num_batches * BATCH_SIZE
            batch_data = load_dataset(coords=coords[offset:n, :])
            feed_dict = {test_data_node2: batch_data}
            # Run the graph and fetch some of the nodes.
            test_predictions = s.run(model(test_data_node2),
                                     feed_dict=feed_dict)
            if gps:
                suspect = np.concatenate([
                    suspect,
                    labels_GPS_list(labels=np.array(test_predictions[:, :, 1]),
                                    coords=coords[offset:n, :],
                                    pixels=PIXELperLABEL,
                                    zoom=ZOOM_LEVEL)
                ],
                                         axis=0)
            else:
                suspect = np.concatenate(
                    [suspect, np.array(test_predictions[:, :, 1])], axis=0)

        # Finally save the result!
        print('%s: Result with %d rows and  %d columns. ' %
              (time.ctime(), suspect.shape[0], suspect.shape[1]))
        if csv:
            np.savetxt('tmp_images/assemble1_susp.csv',
                       suspect,
                       fmt='%.6f',
                       delimiter=', ')
        else:
            return (suspect)
        #np.savetxt('tmp_images/ids.csv', data_matrix,  fmt='%.6f', delimiter=', ')
        s.close()
Example #4
0
def main(csv=True, coords=None, gps=False):  # pylint: disable=unused-argument
    if coords is None:
        coords = np.array([[-1.353887, 50.965639], [-1.386731, 50.935744],
                           [-1.401907, 50.925657], [-1.368191, 50.914509],
                           [-1.354101, 50.90988], [-1.34853, 50.902636],
                           [-0.964885, 50.81373]])
    n = coords.shape[0]

    print('%s: Loading nnet...' % time.ctime())
    # For the validation and test data, we'll just hold the entire dataset in
    # one constant node.
    #test_data_node = tf.constant(data)
    test_data_node = tf.placeholder(tf.float32,
                                    shape=(BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE,
                                           NUM_CHANNELS))
    test_data_node2 = tf.placeholder(tf.float32,
                                     shape=(n % BATCH_SIZE, IMAGE_SIZE,
                                            IMAGE_SIZE, NUM_CHANNELS))
    test_centre_node = tf.placeholder(tf.float32,
                                      shape=(BATCH_SIZE, SINGLE_SIZE,
                                             SINGLE_SIZE, NUM_CHANNELS))
    test_centre_node2 = tf.placeholder(tf.float32,
                                       shape=(n % BATCH_SIZE, SINGLE_SIZE,
                                              SINGLE_SIZE, NUM_CHANNELS))

    # The variables below hold all the trainable weights. They are passed an
    # initial value which will be assigned when when we call:
    # {tf.initialize_all_variables().run()}
    depth1 = SINGLE_SIZE * SINGLE_SIZE * NUM_CHANNELS
    conv1_weights = tf.constant(  # shape: [SINGLE_SIZE, SINGLE_SIZE, NUM_CHANNELS, depth1]
        np.reshape(
            np.diag([1 for _ in range(depth1)]).astype('float32'),
            [SINGLE_SIZE, SINGLE_SIZE, NUM_CHANNELS, depth1]))
    #centre_weights =tf.constant( # shape: [IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS, depth1]
    #    get_centre(depth1))
    #conv1_biases = tf.constant(np.zeros([depth1], dtype='float32'))
    # conv2_weights = tf.Variable(tf.concat(3,[tf.ones([1,1,depth1,1])*2*(.5-i%2) for i in range(2*depth1)]))
    conv2_weights = tf.concat(
        3, [tf.ones([1, 1, depth1, 1]), -tf.ones([1, 1, depth1, 1])])
    kernel_weights = tf.ones(
        [SINGLE_SIZE, SINGLE_SIZE, NUM_CHANNELS,
         1])  # gaussian or linear kernel to use instead avg_pool

    def model(data, centre_data, train=False):
        """The Model definition."""
        # 1. extract features from centre and every (other) patch in the same way (first attempt: identity)
        conv1 = tf.nn.conv2d(data,
                             conv1_weights,
                             strides=[1, PIXELperLABEL, PIXELperLABEL, 1],
                             padding='VALID')
        centre = tf.nn.conv2d(centre_data,
                              conv1_weights,
                              strides=[1, PIXELperLABEL, PIXELperLABEL, 1],
                              padding='VALID')
        square_dim = conv1.get_shape().as_list()[2]
        rep_centre = tf.concat(1, [
            tf.concat(2, [centre for _ in range(square_dim)])
            for _ in range(square_dim)
        ])
        layer1 = tf.add(conv1, -rep_centre)
        # 2. compute absolute values of the differences (there must be a better way ...)
        conv2 = tf.nn.depthwise_conv2d(layer1,
                                       conv2_weights,
                                       strides=[1, 1, 1, 1],
                                       padding='SAME')
        # conv2 = tf.nn.conv2d(relu1, conv2_weights, strides=[1, 1, 1, depth1], padding='VALID')
        conv2_shape = conv2.get_shape().as_list()
        layer2 = tf.reshape(conv2, [
            conv2_shape[0],
            conv2_shape[1] * conv2_shape[2] * conv2_shape[3] / 2, 2, 1
        ])
        pool1 = tf.nn.max_pool(layer2,
                               ksize=[1, 1, 2, 1],
                               strides=[1, 1, 2, 1],
                               padding='SAME')
        pool1_shape = pool1.get_shape().as_list()
        layer3 = tf.reshape(
            pool1, [pool1_shape[0], pool1_shape[1] / depth1, depth1, 1])
        # 3. aggregate the absolute pixel differences (first attempt: average)
        out_pool = tf.nn.avg_pool(layer3,
                                  ksize=[1, 1, depth1, 1],
                                  strides=[1, 1, depth1, 1],
                                  padding='SAME')
        out_shape = out_pool.get_shape().as_list()
        print(
            'Dimensions of network Tensors: [minibatch size, ..dims.. , channels]'
        )
        print(data.get_shape().as_list(), '->',
              conv1.get_shape().as_list(), '->',
              layer1.get_shape().as_list(), '->',
              conv2.get_shape().as_list(), '->',
              layer2.get_shape().as_list(), '->',
              pool1.get_shape().as_list(), '->',
              layer3.get_shape().as_list(), '->', out_shape)
        return tf.reshape(out_pool, [out_shape[0], out_shape[1]])

    # Create a local session to run this computation.
    with tf.Session() as s:
        # Run all the initializers to prepare the trainable parameters.
        tf.initialize_all_variables().run()
        num_batches = np.floor(n / BATCH_SIZE).astype('int16')
        print('%s: Initialized! (Loading data in %d minibatches))' %
              (time.ctime(), num_batches + 1))
        if gps:
            from get_data.map_coverage import MercatorProjection, G_Point, G_LatLng
            from get_data.labels_GPS import labels_GPS, labels_suspect, labels_GPS_list
            suspect = np.zeros((0, 2))
        else:
            suspect = np.zeros((0, LABEL_SIZE**2))

        for step in range(num_batches):
            offset = (step * BATCH_SIZE)
            batch_data = load_dataset(coords=coords[offset:(offset +
                                                            BATCH_SIZE), :])
            feed_dict = {
                test_data_node: batch_data,
                test_centre_node: get_centre(batch_data)
            }
            # Run the graph and fetch some of the nodes.
            test_predictions = s.run(model(test_data_node, test_centre_node),
                                     feed_dict=feed_dict)
            print('.'),
            if gps:
                suspect = np.concatenate([
                    suspect,
                    labels_GPS_list(
                        labels=np.array(test_predictions[:, :, 1]),
                        coords=coords[offset:(offset + BATCH_SIZE), :],
                        pixels=PIXELperLABEL,
                        zoom=ZOOM_LEVEL)
                ],
                                         axis=0)
            else:
                suspect = np.concatenate(
                    [suspect, np.array(test_predictions[:, :, 1])], axis=0)
            if (step + 1) % 10 == 0:
                print('%s: Processing batch %d out of %d' %
                      (time.ctime(), step, num_batches + 1))
                np.savetxt('tmp_images/suspect_tmp' + time.ctime()[3:10] +
                           '.csv',
                           suspect,
                           fmt='%.6f',
                           delimiter=', ')

        if n % BATCH_SIZE > 0:
            offset = num_batches * BATCH_SIZE
            batch_data = load_dataset(coords=coords[offset:n, :])
            feed_dict = {
                test_data_node2: batch_data,
                test_centre_node2: get_centre(batch_data)
            }
            # Run the graph and fetch some of the nodes.
            test_predictions = s.run(model(test_data_node2, test_centre_node2),
                                     feed_dict=feed_dict)
            if gps:
                suspect = np.concatenate([
                    suspect,
                    labels_GPS_list(labels=np.array(test_predictions[:, :, 1]),
                                    coords=coords[offset:n, :],
                                    pixels=PIXELperLABEL,
                                    zoom=ZOOM_LEVEL)
                ],
                                         axis=0)
            else:
                suspect = np.concatenate(
                    [suspect, np.array(test_predictions)], axis=0)

        # Finally save the result!
        print('%s: Result with %d rows and  %d columns. ' %
              (time.ctime(), suspect.shape[0], suspect.shape[1]))
        if csv:
            np.savetxt('tmp_images/centre_heat.csv',
                       suspect,
                       fmt='%.6f',
                       delimiter=', ')
        else:
            return (suspect)
        #np.savetxt('tmp_images/ids.csv', data_matrix,  fmt='%.6f', delimiter=', ')
        s.close()