Ejemplo n.º 1
0
def spectrogram(stft,
                window_size,
                overlap,
                fs,
                y='linear',
                freq_subset: tuple = None,
                c_bar=None):

    hop_len = window_size * (1 - overlap)

    display.specshow(stft, y_axis=y, sr=fs, hop_length=hop_len)

    if c_bar is str:
        plt.colorbar(format="%.2f " + "{}".format(c_bar))

    if freq_subset:
        hz_per_bin = (fs / 2) / (1 + window_size / 2)
        locs, labels = plt.yticks()
        c = hz_per_bin * math.floor(freq_subset[0] / hz_per_bin)
        d = hz_per_bin * math.ceil(freq_subset[1] / hz_per_bin)
        new_labels = [
            "%.2f" % map_range(locs[i], locs[0], locs[-1], c, d)
            for i in range(len(locs))
        ]
        plt.yticks(locs, new_labels)

    return plt.gca()
Ejemplo n.º 2
0
def create_images(opt):
    #  preprocess = create_preprocess(opt)
    net = load_pretrained(opt)
    if (len(opt.ldr) == 1) and os.path.isdir(opt.ldr[0]):
        # Treat this as a directory of ldr images
        opt.ldr = [
            os.path.join(opt.ldr[0], f)
            for f in os.listdir(opt.ldr[0])
            if any(f.lower().endswith(x) for x in opt.ldr_extensions)
        ]
    for ldr_file in opt.ldr:
        loaded = cv2.imread(
            ldr_file, flags=cv2.IMREAD_ANYDEPTH + cv2.IMREAD_COLOR
        )
        if loaded is None:
            print('Could not load {0}'.format(ldr_file))
            continue
        ldr_input = preprocess(loaded, opt)
        if opt.resize:
            out_name = create_name(
                ldr_file, 'resized', 'jpg', opt.out, opt.tag
            )
            cv2.imwrite(out_name, (ldr_input * 255).astype(int))

        t_input = cv2torch(ldr_input)
        if opt.use_gpu:
            net.cuda()
            t_input = t_input.cuda()
        prediction = map_range(
            torch2cv(net.predict(t_input, opt.patch_size).cpu()), 0, 1
        )

        extension = 'exr' if opt.use_exr else 'hdr'
        out_name = create_name(
            ldr_file, 'prediction', extension, opt.out, opt.tag
        )
        print(f'Writing {out_name}')
        cv2.imwrite(out_name, prediction)
        if opt.tone_map is not None:
            tmo_img = tone_map(
                prediction, opt.tone_map, **create_tmo_param_from_args(opt)
            )
            out_name = create_name(
                ldr_file,
                'prediction_{0}'.format(opt.tone_map),
                'jpg',
                opt.out,
                opt.tag,
            )
            cv2.imwrite(out_name, (tmo_img * 255).astype(int))
Ejemplo n.º 3
0
def lloyd_mesh(im, cells=10, itter=10):
    '''
	Returns an array of random points coordinates whose shapes are somewhat uniform.

	This algoritm implements Kmean clustering.
	Arguments:

	im: the image to work with.

	cells: the % of cell count to have one the screen. 100 means .75% of the image, as 100% will be completly full
	
	itter: the number of itteration used to cluster regions.
	'''
    assert cells >= 1 and cells <= 100, 'tiles cannot be larger than 100% of the image'
    assert itter > 0 and itter < 50, 'invalid iteration number (jic someone puts infinity)'

    # copy the image, to prevent inplace operations
    img = im.copy()

    # the max of either width or height will decide the ratio
    cells = int(map_range(cells, 1, 100, max(*im.shape[:2]), 1))

    # get initial random points. Do not include edges as Kmean will disrupt them.
    points = random_pts(im, edges=False)
    # create the clusters
    # Define criteria = ( type, max_iter = 10 by default, epsilon = 1.0 )
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, itter, 1.0)
    flags = cv2.KMEANS_RANDOM_CENTERS
    # Apply KMeans
    _, labels, centers = cv2.kmeans(np.float32(points), cells, None, criteria,
                                    itter, flags)
    # for i in range(cells):
    # plt.scatter(points[labels.ravel() == i][:,0],points[labels.ravel() == i][:,1])
    # optionally plot it and see how many points are now only grouped into n regions
    # plot.show()
    print('--add done: sent for display---')
    return add_edges(centers, *im.shape[:2])
Ejemplo n.º 4
0
def transform(hdr):
    hdr = slice_gauss(hdr, crop_size=(384, 384), precision=(0.1, 1))
    hdr = cv2.resize(hdr, (256, 256))
    hdr = map_range(hdr)
    ldr = random_tone_map(hdr)
    return cv2torch(ldr), cv2torch(hdr)
Ejemplo n.º 5
0
def preprocess(x, opt):
    x = x.astype('float32')
    if opt.resize:
        x = resize(x, size=(opt.width, opt.height))
    x = map_range(x)
    return x
Ejemplo n.º 6
0
def circles(im, s=.6, angle=0, queue=None):
    '''
	Creates circular tiles of the image.
	inspired by: https://www.gettyimages.ie/detail/illustration/monochrome-halftone-dots-wavy-pattern-royalty-free-illustration/626917876

	BUG: The rotation isn't working as intended, on the real image. 
	
	Arguments:

	im: the image to work with
	s: the % of spacing between the tiles.
	
	'''

    s = s if s > 0 and s < 50 else 2
    img = im.copy()
    # as working only with the grascale value
    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    # add the lightness to the image to increase contrast
    img = cv2.equalizeHist(img)
    # set the maximum radius  size in % to keep the proportions
    height, width = img.shape[:2]

    s = int(map_range(s, 0, 50, 0, max(width, height)))

    canvas = 255 * np.ones_like(img)

    for i in range(0, height, s):
        for j in range(0, width, s):
            # go s steps at a time and get the average color in each region.
            y1, y2, x1, x2 = i, i + s, j, j + s
            # maybe rotate 45 degree here then calculat the mean at 45 degree?
            avg = img[y1:y2, x1:x2].mean(axis=0).mean(axis=0)
            # max radius is 20, map the range of the radius using the avg value above.
            # values closer to white will have small radius
            radius = int(map_range(avg, 0, 255, s // 2, 2))
            # The fill color will not be too black, for asthetic reasons. map it as well
            # maximum black = 50, maximum white = 240 (
            # this changes crops the histogram from [0,255] to [50, 240])
            fill_color = int(map_range(avg, 0, 255, 5, 200))
            # the center of the circle
            x, y = (x1 + x2) // 2, (y2 + y1) // 2
            # if the mat has to be rotated
            if angle % 360 != 0:
                # now rotate the coordinates by this value
                x, y = rotate(x - s, y, x, y, 135)
            # draw the circles at this place
            cv2.circle(canvas, (x, y), radius, fill_color, -1)
    canvas = cv2.addWeighted(canvas, .9, img, .1, 2)
    blend_used, canvas = gradient_blend(canvas)

    # if running concurently send the items to the queue
    if queue:
        label = 'Circles: '
        label += 'Steps: {} '.format(s)
        label += 'Angle: {} '.format(angle)
        label += 'Blend: {} '.format(
            blend_used) if blend_used is not '' else ''
        queue.put((label, canvas))
    else:
        # return return the image instead.
        return canvas
Ejemplo n.º 7
0
def fib_mesh(im, step):
    '''
	Returns an array of points computed with the golden ration.

	This algoritm uses the fibocacci sequence, it does not recalculate it. it just uses it.
	
	Arguments:

	im: the image to work with.
	
	step: used to define the cell size to use. must be > 0 and <= 50.
	but beware < 10 probably too small for high res.
	100% means skip 100% percent of the image at a time. not useful, ain't it?!
	'''
    # check if not backward steps and/or more than 50%
    assert step > 0 and step <= 50

    # copy the image to prevent inplace operations
    img = im.copy()
    height, width = img.shape[:2]

    # enter the gray area
    if len(img.shape) > 2:
        img = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)

    # convert the step into percentage to prevent uneven ratios based on resolution
    s = (step * width) // 100

    # init an array of points, dunno why numpy always gives you an element at array init
    # just override it when I start writting to it damn it!!
    points = np.zeros((1, 2), dtype=int)

    # using this slightly modified fibonacci sequence. dense areas with opacity ~ 0
    # will be assigned to larger index number in the fibonacci_ish array
    # (creating smaller triangles) whist
    # brighter points will have lesser points (creating) larger triangles.
    # more points (max = 144 for 0) compared to the light areas (min = 1 for 255)

    # I modified this sequence a bit by removing the first 2 digits to have
    # a more evenly distributed version for my unevely distributed needs.
    #
    fibonacci_ish = [1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144]
    # Todo: divide the image in CHUNK sizes and send CHUNKS to each of N treads to compute
    # these points concurently to speed up things.
    # CHUNK_SIZE = 100 # any image larger than 100 * 100 will be divided into chuncks of 100 * 100
    # from threading import Thread
    # from queue import Queue
    # from multiprocessing import lock (one thread at a time can append their chunck points)
    # q = Queue()
    # def work_a_chunk(w, h, q):
    # 	pass

    # or be lazy: to keep things simple!
    for i in range(0, height, s):
        for j in range(0, width, s):
            # go s steps at a time and get the average color in each region.
            y1, y2, x1, x2 = i, i + s, j, j + s
            avg = img[y1:y2, x1:x2].mean(axis=0).mean(axis=0)
            # map this average color to the number of points we need to draw in this region
            # if the average is too dark, the number will be closer to len(fib-ish) = will
            # results in many random points being choosen there. as fib numbers go high
            # depending on the index of the fib number.
            num_pts = int(map_range(avg, 0, 255, len(fibonacci_ish) - 1, 0))
            # now get fib_is[num_points] points from this region.
            new_points = randrange_pts_2d(x1, x2, y1, y2,
                                          fibonacci_ish[num_pts])
            # append these points to the array.
            # REMINDER: this could have been the critical region if used concurently.
            points = np.append(points, new_points, axis=0)

    # add the edges to prevent clipping. and we are done!
    return add_edges(points, *im.shape[:2])
Ejemplo n.º 8
0
def grade_crime_distance(d):

    if d < (quarter_mile / 2):
        d = (quarter_mile / 2)

    return 0.5 - map_range(d, (quarter_mile / 2), crime_eval_distance, 0, 0.5)
Ejemplo n.º 9
0
def grade_crime_time(sec):
    if sec < one_week_seconds:
        sec = one_week_seconds

    return map_range(sec, one_week_seconds, crime_eval_period_seconds, 0, 0.5)