def cascade(screenpos, i, nletters):
    v = np.array([0, -1])

    def d(t):
        return 1 if t < 0 else abs(np.sinc(t) / (1 + t**4))

    return lambda t: screenpos + v * 400 * d(t - 0.15 * i)
def arrive(screenpos, i, nletters):
    v = np.array([-1, 0])

    def d(t):
        return max(0, 3 - 3 * t)

    return lambda t: screenpos - 400 * v * d(t - 0.2 * i)
    return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))


def _int64_feature(value):
    return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))


tfrecords_filename = 'pascal_voc_segmentation.tfrecords'

writer = tf.python_io.TFRecordWriter(tfrecords_filename)

# Let's collect the real images to later on compare to the reconstructed ones
original_images = []

for img_path, annotation_path in filename_pairs:
    img = np.array(Image.open(img_path))
    annotation = np.array(Image.open(annotation_path))

    # The reason to store image sizes was demonstrated
    # in the previous example -- we have to know sizes of images
    # to later read raw serialized string, convert to 1d array and convert to
    # respective shape that image used to have.

    height = img.shape[0]
    width = img.shape[1]

    # Put in the original images into array
    # Just for future check for correctness
    original_images.append((img, annotation))

    img_raw = img.tostring()
""" Exp Konvergenz """

N_max = 1000            #Anz Teilintervalle
N = np.arange(N_max + 1)

err = np.zeros(N_max)

for i in range(N_max):
    err[i] = abs(I_exact - QF(f, a, b, N[i]))

plt.loglog(N, err)
plt.loglog(N, N**(-2))

p = - np.polyfit(np.log(N), np.log(err), 1)[0]


""" Mehrdimensionale Quadratur """ 

#Intervalle [a,b] und [c,d] in 2-dim

F = lambda y: QF(lambda x: f(x,y), a, b, N)
I = QF(F, c, d, Ny)

#Reference Lösung aus SciPy
I_exact = scipy.integrate.nquad(f, np.array([[a, b], [c, d]]))[0]




def rotMatrix(a):
    return np.array([np.cos(a), np.sin(a)], [-np.sin(a), np.cos(a)])