def filter_rgb_to_grayscale(image):
    start_time = time.time()
    image_grayscale = image.convert(mode="L")
    elapsed_time = time.time() - start_time
    log.print_debug("RGB converted to Gray scale || Time Elapsed: " +
                    str(elapsed_time))
    return image_grayscale
def load_datasets(*sampSizes):
    x = []
    y = []
    p = []
    for ss in sampSizes:
        log.print_debug("Opening Cropped dataset " + str(ss))
        cropped_dataset_folder = path.join(CROP_FOLDER, str(ss))
        for filename in os.listdir(cropped_dataset_folder):
            try:
                img_path = path.join(cropped_dataset_folder, filename)
                img_patient = filename.split("_")[0]
                img_class = CATEGORIES.index(str(filename.split("_")[1]))
                img = Image.open(img_path).convert('RGB')
                img_array = np.asarray(img, np.uint8)
                if filter.check_valid(img_array):
                    to_append_img = np.asarray(
                        img.resize((int(INPUT_SIZE), int(INPUT_SIZE)),
                                   Image.LANCZOS))
                    x.append(to_append_img)
                    y.append(img_class)
                    p.append(img_patient)
                else:
                    log.print_warning("Img " + filename +
                                      " not loaded: too much white")
            except Exception as e:
                log.print_error("Cannot load image " + filename)
    return x, y, p
Exemple #3
0
def image_to_np_rgb(image):
    start_time = time.time()
    np_image = np.asarray(image, dtype="int32")
    elapsed_time = time.time() - start_time
    log.print_debug("Image converted to array || Time Elapsed: " +
                    str(elapsed_time))
    return np_image
Exemple #4
0
def save_image(image, folder, filename):
    start_time = time.time()
    path = os.path.join(str(folder), str(filename + ".png"))
    image.save(path, "PNG")
    elapsed_time = time.time() - start_time
    log.print_debug("Image " + str(path) + " saved || Time Elapsed: " +
                    str(elapsed_time))
def complement_np(np_image):
    start_time = time.time()
    np_complemented = 255 - np_image
    elapsed_time = time.time() - start_time
    log.print_debug("Image complemented || Shape: " +
                    str(np_complemented.shape) + "+ || Time Elapsed: " +
                    str(elapsed_time))
    return np_complemented
def filter_np_rgb_to_cv_grayscale(np_image):
    start_time = time.time()
    np_grayscale = cv.cvtColor(np.asarray(np_image), cv.COLOR_RGB2GRAY)
    elapsed_time = time.time() - start_time
    log.print_debug("RGB converted to CV Gray scale || Shape: " +
                    str(np_grayscale.shape) + "+ || Time Elapsed: " +
                    str(elapsed_time))
    return np_grayscale
def read_slide_crop(slide, x, y, crop_size):
    start_time = time.time()
    #log.print_debug("Converting slide to image. Requires time!")
    image = slide.read_region((x, y), LVL, (crop_size, crop_size))
    elapsed_time = time.time() - start_time
    log.print_debug("Converted slide to image || Shape: " + str(image.size) +
                    "+ || Time Elapsed: " + str(elapsed_time))
    return image
Exemple #8
0
 def run(self):
     self.__write_header()
     rc_datas = self.__get_rc_datas()
     print_debug('Get racing result [ %3d ]', len(rc_datas))
     with Pool(self.pool_size) as pool:
         write_datas = pool.imap(Crawler.get_rc_result_helper, rc_datas)
         for write_data in write_datas:
             self.__write_file(write_data)
     self.file_handler.close()
def apply_mask(np_rgb_image, np_mask):
    start_time = time.time()
    np_rgb_masked = np.zeros(np_rgb_image.shape)
    np_rgb_masked[:, :, 0] = np_rgb_image[:, :, 0] * (np_mask / 255)
    np_rgb_masked[:, :, 1] = np_rgb_image[:, :, 1] * (np_mask / 255)
    np_rgb_masked[:, :, 2] = np_rgb_image[:, :, 2] * (np_mask / 255)
    elapsed_time = time.time() - start_time
    log.print_debug("Mask applied || Time Elapsed: " + str(elapsed_time))
    return np_rgb_masked
def resize_image_r(image, scale_factor):
    start_time = time.time()
    width, height = image.size
    image_r = image.resize(
        (int(width / scale_factor), int(height / scale_factor)), Image.LANCZOS)
    elapsed_time = time.time() - start_time
    log.print_debug("Image resized || Shape: " + str(image.size) +
                    "+ || Time Elapsed: " + str(elapsed_time))
    return image_r
def slide_to_image(slide):
    start_time = time.time()
    #log.print_debug("Converting slide to image. Requires time!")
    width, height = get_slide_size(slide)
    # MAX width*height = 2**29!!!!!!! IMPORTANT
    image = slide.read_region((0, 0), LVL, (width, height))
    elapsed_time = time.time() - start_time
    log.print_debug("Converted slide to image || Shape: " + str(image.size) +
                    "+ || Time Elapsed: " + str(elapsed_time))
    return image
def image_to_cv(image):
    start_time = time.time()
    log.print_debug("Converting image to cv format. Requires time!")
    np_image = np.asarray(image)
    # Convert RGB to BGR
    open_cv_image = cv.cvtColor(np_image, cv.COLOR_RGB2BGR)
    elapsed_time = time.time() - start_time
    log.print_debug("Converted image to cv || Shape: " + str(image.size) +
                    "+ || Time Elapsed: " + str(elapsed_time))
    return open_cv_image
def open_slide(slide_path):
    start_time = time.time()
    try:
        slide = openslide.open_slide(slide_path)
    except Exception:
        slide = None
    elapsed_time = time.time() - start_time
    if slide is None:
        log.print_error("Cannot open slide " + slide_path +
                        " || Time Elapsed: " + str(elapsed_time))
    else:
        log.print_debug("Opened slide " + slide_path + " || Shape: " +
                        str(get_slide_size(slide)) + " || Time Elapsed: " +
                        str(elapsed_time))
    return slide
Exemple #14
0
def list_np_to_pil_test(image_np, mode, x_max, y_max):
    start_time = time.time()

    array = np.zeros((y_max * image_np.shape[1], x_max * image_np.shape[2], 3))
    for i in range(0, image_np.shape[0]):
        for j in range(0, image_np.shape[1]):
            for k in range(0, image_np.shape[2]):
                x = i // x_max * image_np.shape[1] + j
                y = i % x_max * image_np.shape[2] + k
                for t in range(0, image_np.shape[3]):
                    array[x][y][t] = image_np[i][j][k][t]

    image = Image.fromarray(np.asarray(np.clip(array, 0, 255), dtype="uint8"),
                            mode)
    elapsed_time = time.time() - start_time
    log.print_debug("Array converted to image || Time Elapsed: " +
                    str(elapsed_time))
    return image
def balance_set(x, y, in_set_patients):
    log.print_debug("Balancing dataset")
    cropped_dataset_folder = path.join(CROP_FOLDER, str(1120))
    x_list = x.tolist()
    unique, counts = np.unique(y, return_counts=True)
    max_class_size = max(counts)
    for i in range(0, unique.shape[0]):
        if counts[i] < max_class_size:
            file_list = glob.glob(
                path.join(cropped_dataset_folder,
                          "*" + CATEGORIES[unique[i]] + "*"))
            cleaned_file_list = file_list.copy()
            for filename in cleaned_file_list:
                img_patient = os.path.basename(filename).split("_")[0]
                if img_patient in in_set_patients:
                    cleaned_file_list.remove(filename)
            images_to_add = max_class_size - counts[i]
            for j in range(0, max_class_size - counts[i]):
                if len(cleaned_file_list) > 0:
                    filename = random.choice(cleaned_file_list)
                    cleaned_file_list.remove(filename)
                    img_class = unique[i]
                    img = Image.open(filename).convert('RGB')
                    img_array = np.asarray(img, np.uint8)
                    if filter.check_valid(img_array):
                        to_append_img = np.asarray(
                            img.resize((int(INPUT_SIZE), int(INPUT_SIZE)),
                                       Image.LANCZOS))
                        x_list.append(to_append_img)
                        y = np.append(y, img_class)
                        images_to_add = images_to_add - 1
                        #log.print_debug("Img " + filename + " added to set. " + str( images_to_add ) + " images to go.")
                    else:
                        log.print_warning("Img " + filename +
                                          " not loaded: too much white")
                        continue
                else:
                    log.print_warning("No more available images for class " +
                                      CATEGORIES[unique[i]])
                    break
    return np.asarray(x_list), y
def dataset_split(x, y, p, test_factor=0.5, random_state=None):
    log.print_debug("Splitting dataset")
    dataset = []
    for i in range(0, len(y)):
        dataset.append([x[i], y[i], p[i]])
    random.seed(random_state)
    r = {p_e: random.random() for x_e, y_e, p_e in dataset}
    dataset.sort(key=lambda item: r[item[2]])
    train_size = int(len(dataset) - int(len(dataset) * test_factor))
    before_different = train_size - 2
    after_different = train_size
    while dataset[before_different][2] == dataset[train_size - 1][2]:
        before_different = before_different - 1
    while dataset[after_different][2] == dataset[train_size - 1][2]:
        after_different = after_different + 1
    if train_size - before_different < after_different - train_size:
        X_train = np.asarray(dataset)[:before_different + 1, 0]
        y_train = np.asarray(dataset)[:before_different + 1, 1]
        X_test = np.asarray(dataset)[before_different + 1:, 0]
        y_test = np.asarray(dataset)[before_different + 1:, 1]
        in_train_patients = np.unique(
            np.asarray(dataset)[:before_different + 1, 2])
        in_test_patients = np.unique(
            np.asarray(dataset)[before_different + 1:, 2])
    else:
        X_train = np.asarray(dataset)[:after_different + 1, 0]
        y_train = np.asarray(dataset)[:after_different + 1, 1]
        X_test = np.asarray(dataset)[after_different + 1:, 0]
        y_test = np.asarray(dataset)[after_different + 1:, 1]
        in_train_patients = np.unique(
            np.asarray(dataset)[:after_different + 1, 2])
        in_test_patients = np.unique(
            np.asarray(dataset)[after_different + 1:, 2])

    log.print_info(" Dataset shape : " + str(X_train.shape) + " " +
                   str(y_train.shape) + str(X_test.shape) + " " +
                   str(y_test.shape))

    X_train, y_train = balance_set(X_train, y_train, in_train_patients)
    X_test, y_test = balance_set(X_test, y_test, in_test_patients)
    return X_train, X_test, y_train, y_test
Exemple #17
0
def list_np_to_pil(image_np, mode):
    start_time = time.time()
    print(
        str(image_np.shape[0] * image_np.shape[2]) + " " +
        str(image_np.shape[1] * image_np.shape[3]))
    array = np.zeros((image_np.shape[0] * image_np.shape[2],
                      image_np.shape[1] * image_np.shape[3], 3))
    for i in range(0, image_np.shape[0]):
        for j in range(0, image_np.shape[1]):
            for k in range(0, image_np.shape[2]):
                for t in range(0, image_np.shape[3]):
                    for z in range(0, image_np.shape[4]):
                        array[i * image_np.shape[2] +
                              k][j * image_np.shape[3] +
                                 t][z] = image_np[i][j][k][t][z]
    image = Image.fromarray(np.asarray(np.clip(array, 0, 255), dtype="uint8"),
                            mode)
    elapsed_time = time.time() - start_time
    log.print_debug("Array converted to image || Time Elapsed: " +
                    str(elapsed_time))
    return image
Exemple #18
0
def produce_images():
    crop_sizes = [2240, 4480]
    iter = [1, 10]
    drop_rates = [0.01, 0.1, 0.5]
    blnd.print_multiple_gradient()
    for dr in drop_rates:
        for crop_size in crop_sizes:
            for i in range(1, 6):
                for iterations in iter:
                    start_time = time.time()
                    log.print_debug("PROCESSING map_" + str(i) + ".svs dr:" +
                                    str(dr) + " iter:" + str(iterations) +
                                    " Crop Size" + str(crop_size))
                    make_prediction(
                        [path.join(MAP_FOLDER, "map_" + str(i) + ".svs")],
                        crop_size, dr, iterations)
                    elapsed_time = time.time() - start_time
                    log.print_debug("map_" + str(i) + "_CS" + str(crop_size) +
                                    "_DR" + str(dr).replace("0.", "") +
                                    "_ITER" + str(iterations) + " processed" +
                                    ": " + str(elapsed_time))
                    gc.collect()
Exemple #19
0
    def get_rc_result_helper(info):
        date, RcNo = info
        if RcNo == '1':
            print_debug('Trying %s', date)
        res = requests.post(
            'http://race.kra.co.kr/raceScore/ScoretableDetailList.do', {
                'meet': '1',
                'realRcDate': date,
                'realRcNo': RcNo
            }).content.decode('euc-kr')

        soup = BeautifulSoup(res, 'html.parser')
        table = soup.find_all('div', class_='tableType2')
        if len(table) < 2:
            print_error('Failed to get table (%s, %s)', *info)
            return None
        table1 = table[0].table
        table2 = table[1].table
        tmp = {}
        for horse1 in table1.find_all('tr')[1:]:
            datas = horse1.find_all('td')
            item = [date, RcNo]
            item += list(map(lambda x: x.text.strip(), datas))
            item += [
                Crawler.get_index(datas[2]),
                Crawler.get_index(datas[8]),
                Crawler.get_index(datas[9]),
                Crawler.get_index(datas[10])
            ]
            tmp[int(datas[1].text)] = item

        for horse2 in table2.find_all('tr')[1:]:
            datas = horse2.find_all('td')
            item = list(map(lambda x: x.text.strip(), datas[3:]))
            tmp[int(datas[1].text)] += item

        return sorted(list(tmp.values()), key=lambda x: int(x[0][0]))
Exemple #20
0
    def __get_rc_datas(self):
        print_debug('Get racing date & raceno (%s - %s)', self.from_date,
                    self.end_date)

        def get_rc_data_helper(index):
            res = requests.post(
                'http://race.kra.co.kr/raceScore/ScoretableScoreList.do', {
                    'nextFlag': 'true',
                    'fromDate': self.from_date,
                    'toDate': self.end_date,
                    'pageIndex': index
                },
                timeout=5).content.decode('euc-kr')
            soup = BeautifulSoup(res, 'html.parser')
            table = soup.find('div', class_='tableType2').table
            items = table.find_all('tr')[1:]

            result = []
            for item in items:
                data = item.find_all('td')[1:]
                if len(data) == 0:
                    return None
                result += [
                    (data[0].text.strip().split('\n')[0].replace('/', ''),
                     rc_no.text.split(' ')[0].strip())
                    for rc_no in data[1].find_all('a')
                ]
            return result

        result = []
        index = 1
        while True:
            item = get_rc_data_helper(index)
            if item is None:
                return result
            result += item
            index += 1
Exemple #21
0
# Exercises all live stream operations.

# Authentication Setup
configuration = mux_python.Configuration()
configuration.username = os.environ['MUX_TOKEN_ID']
configuration.password = os.environ['MUX_TOKEN_SECRET']

# API Client Initialization
live_api = mux_python.LiveStreamsApi(mux_python.ApiClient(configuration))
playback_ids_api = mux_python.PlaybackIDApi(mux_python.ApiClient(configuration))

# ========== create-live-stream ==========
new_asset_settings = mux_python.CreateAssetRequest(playback_policy=[mux_python.PlaybackPolicy.PUBLIC])
create_live_stream_request = mux_python.CreateLiveStreamRequest(playback_policy=[mux_python.PlaybackPolicy.PUBLIC], new_asset_settings=new_asset_settings)
create_live_stream_response = live_api.create_live_stream(create_live_stream_request)
logger.print_debug(str(create_live_stream_response))
assert create_live_stream_response != None
assert create_live_stream_response.data != None
assert create_live_stream_response.data.id != None
print("create-live-stream OK ✅")

# ========== list-live-streams ==========
list_live_streams_response = live_api.list_live_streams()
logger.print_debug(str(list_live_streams_response))
assert list_live_streams_response != None
assert list_live_streams_response.data != None
assert list_live_streams_response.data[0].id == create_live_stream_response.data.id
print("list-live-streams OK ✅")

# ========== get-live-stream ==========
live_stream_response = live_api.get_live_stream(create_live_stream_response.data.id)
Exemple #22
0
configuration.username = os.environ['MUX_TOKEN_ID']
configuration.password = os.environ['MUX_TOKEN_SECRET']

# API Client Initialization
uploads_api = mux_python.DirectUploadsApi(mux_python.ApiClient(configuration))

# ========== create-direct-upload ==========
create_asset_request = mux_python.CreateAssetRequest(
    playback_policy=[mux_python.PlaybackPolicy.PUBLIC])
create_upload_request = mux_python.CreateUploadRequest(
    timeout=3600,
    new_asset_settings=create_asset_request,
    cors_origin="philcluff.co.uk")
create_upload_response = uploads_api.create_direct_upload(
    create_upload_request)
logger.print_debug(str(create_upload_response))
assert create_upload_response != None
assert create_upload_response.data != None
assert create_upload_response.data.id != None
print("create-direct-upload OK ✅")

# ========== list-direct-uploads ==========
list_direct_uploads_response = uploads_api.list_direct_uploads()
logger.print_debug(str(list_direct_uploads_response))
assert list_direct_uploads_response != None
assert list_direct_uploads_response.data != []
assert list_direct_uploads_response.data[
    0].id == create_upload_response.data.id
print("list-direct-uploads OK ✅")

# ========== get-direct-upload ==========
def open_dataset():
    x_path = path.join(SET_FOLDER, "X.pickle")
    y_path = path.join(SET_FOLDER, "y.pickle")
    p_path = path.join(SET_FOLDER, "p.pickle")

    if not os.path.isdir(SET_FOLDER):
        os.makedirs(SET_FOLDER)

    if os.path.isfile(x_path) and os.path.isfile(y_path) and os.path.isfile(
            p_path):
        log.print_debug("Opening saved sets in " + str(SET_FOLDER))
        pickle_in = open(x_path, "rb")
        X = pickle.load(pickle_in)
        pickle_in = open(y_path, "rb")
        y = pickle.load(pickle_in)
        pickle_in = open(p_path, "rb")
        p = pickle.load(pickle_in)
    else:
        X, y, p = load_datasets(1344, 2240, 3136)
        log.print_debug("Saving and opening sets in " + str(SET_FOLDER))
        pickle_out = open(x_path, "wb")
        pickle.dump(X, pickle_out)
        pickle_out.close()
        pickle_out = open(y_path, "wb")
        pickle.dump(y, pickle_out)
        pickle_out.close()
        pickle_out = open(p_path, "wb")
        pickle.dump(p, pickle_out)
        pickle_out.close()

    log.print_info(" Dataset shape : " + str(len(X)) + " " + str(len(y)) +
                   " " + str(len(p)))

    if not os.path.isdir(path.join(SET_FOLDER, str(RANDOM_STATE))):
        os.makedirs(path.join(SET_FOLDER, str(RANDOM_STATE)))
    x_train_path = path.join(SET_FOLDER, str(RANDOM_STATE), "X_train.pickle")
    y_train_path = path.join(SET_FOLDER, str(RANDOM_STATE), "y_train.pickle")
    x_test_path = path.join(SET_FOLDER, str(RANDOM_STATE), "X_test.pickle")
    y_test_path = path.join(SET_FOLDER, str(RANDOM_STATE), "y_test.pickle")
    if os.path.isfile(x_train_path) and os.path.isfile(
            y_train_path) and os.path.isfile(x_test_path) and os.path.isfile(
                y_test_path):
        pickle_in = open(x_train_path, "rb")
        X_train = pickle.load(pickle_in)
        pickle_in = open(y_train_path, "rb")
        y_train = pickle.load(pickle_in)
        pickle_in = open(x_test_path, "rb")
        X_test = pickle.load(pickle_in)
        pickle_in = open(y_test_path, "rb")
        y_test = pickle.load(pickle_in)
    else:
        X_train, X_test, y_train, y_test = dataset_split(
            X, y, p, test_factor=TEST_SIZE, random_state=RANDOM_STATE)
        pickle_out = open(x_train_path, "wb")
        pickle.dump(X_train, pickle_out)
        pickle_out.close()
        pickle_out = open(y_train_path, "wb")
        pickle.dump(y_train, pickle_out)
        pickle_out.close()
        pickle_out = open(x_test_path, "wb")
        pickle.dump(X_test, pickle_out)
        pickle_out.close()
        pickle_out = open(y_test_path, "wb")
        pickle.dump(y_test, pickle_out)
        pickle_out.close()

    return X_train, y_train, X_test, y_test
Exemple #24
0
# coding: utf-8

import os
import sys
import time
import mux_python
from mux_python.rest import NotFoundException
import logger

# Exercises all export operations.

# Authentication Setup
configuration = mux_python.Configuration()
configuration.username = os.environ['MUX_TOKEN_ID']
configuration.password = os.environ['MUX_TOKEN_SECRET']

# API Client Initialization
exports_api = mux_python.ExportsApi(mux_python.ApiClient(configuration))

# ========== list-exports ==========
list_exports_response = exports_api.list_exports()
logger.print_debug('Listed Exports:' + str(list_exports_response))
assert list_exports_response != None
assert list_exports_response.data != None
print("list-exports OK ✅")
Exemple #25
0
import sys
import time
import mux_python
from mux_python.rest import NotFoundException
import logger

# Exercises all export operations.

# Authentication Setup
configuration = mux_python.Configuration()
configuration.username = os.environ['MUX_TOKEN_ID']
configuration.password = os.environ['MUX_TOKEN_SECRET']

# API Client Initialization
video_views_api = mux_python.VideoViewsApi(mux_python.ApiClient(configuration))

# ========== list-video-views ==========
list_video_views_response = video_views_api.list_video_views(filters=['country:US', 'browser:Safari'], timeframe=['7:days'])
logger.print_debug('List Video Views Response ' + str(list_video_views_response))
assert list_video_views_response != None
assert list_video_views_response.data != None
assert len(list_video_views_response.data) > 0
assert list_video_views_response.data[0] != None
print("list-video-views OK ✅")

# ========== get-video-view ==========
video_view_response = video_views_api.get_video_view(list_video_views_response.data[0].id)
assert video_view_response != None
assert video_view_response.data != None
print("get-video-view OK ✅")
Exemple #26
0
import os
import sys
import time
import mux_python
from mux_python.rest import NotFoundException
import logger

# Authentication Setup
configuration = mux_python.Configuration()
configuration.username = os.environ['MUX_TOKEN_ID']
configuration.password = os.environ['MUX_TOKEN_SECRET']

# API Client Initialization
filters_api = mux_python.FiltersApi(mux_python.ApiClient(configuration))

# ========== list-filters ==========
list_filters_response = filters_api.list_filters()
logger.print_debug('List Filters Response: ' + str(list_filters_response))
assert list_filters_response != None
assert list_filters_response.data != None
assert list_filters_response.data.basic != None
assert list_filters_response.data.advanced != None
print("list-filters OK ✅")

# ========== list-filter-values ==========
list_filter_values_response = filters_api.list_filter_values('browser', timeframe=['7:days'])
logger.print_debug('List Filters Values Response: ' + str(list_filter_values_response))
assert list_filter_values_response != None
assert list_filter_values_response.data != None
print("list-filter-values OK ✅")
def normalize_filter(np_image):
    start_time = time.time()
    np_image *= 255.0 / np_image.max()
    elapsed_time = time.time() - start_time
    log.print_debug("Otsu filter scikit || Time Elapsed: " + str(elapsed_time))
    return np_image
from mux_python.rest import NotFoundException
import logger

# Exercises all url signing key operations.

# Authentication Setup
configuration = mux_python.Configuration()
configuration.username = os.environ['MUX_TOKEN_ID']
configuration.password = os.environ['MUX_TOKEN_SECRET']

# API Client Initialization
keys_api = mux_python.URLSigningKeysApi(mux_python.ApiClient(configuration))

# ========== create-url-signing-key ==========
create_key_response = keys_api.create_url_signing_key()
logger.print_debug(create_key_response)
assert create_key_response != None
assert create_key_response.data.id != None
assert create_key_response.data.private_key != None
print("create-url-signing-key OK ✅")

# ========== list-url-signing-keys ==========
list_keys_response = keys_api.list_url_signing_keys()
logger.print_debug(list_keys_response)
assert list_keys_response != None
assert list_keys_response.data[0].id != None
assert list_keys_response.data[0].id == create_key_response.data.id
assert list_keys_response.data[0].private_key == None
print("list-url-signing-keys OK ✅")

# ========== get-url-signing-key ==========
def resize_image_a(image, width, height):
    start_time = time.time()
    image_r = image.resize((int(width), int(height)), Image.LANCZOS)
    elapsed_time = time.time() - start_time
    log.print_debug("Image resized || Time Elapsed: " + str(elapsed_time))
    return image_r
def overlap_crop_multithread(dataset_folder, slide_name_ex, custom_ss):
    start_time = time.time()
    pool = []
    x_p = []
    y_p = []
    # Folder Creation (if not exist)
    algorithm_crop_folder = "overlap_no_padding"
    make_crop_folder(algorithm_crop_folder, custom_ss)

    # Slide opening
    slide_path = path.join(dataset_folder, slide_name_ex)
    slide_name = slide_name_ex.split(".")[0]
    slide = open_slide(slide_path)
    image = slide_to_image(slide)
    width, height = get_slide_size(slide)
    # Computing number of windows
    w_windows = int(math.ceil(width / custom_ss))
    h_windows = int(math.ceil(height / custom_ss))
    # Computing overlap for a single window
    if (w_windows - 1) != 0:
        w_overlap = math.floor(
            ((custom_ss * w_windows) - width) / (w_windows - 1))
    else:
        w_overlap = 0
    if (h_windows - 1) != 0:
        h_overlap = math.floor(
            ((custom_ss * h_windows) - height) / (h_windows - 1))
    else:
        h_overlap = 0
    x = 0
    for j in range(0, w_windows - 1):
        x_p.append(x)
        x += custom_ss - w_overlap
    y = 0
    for i in range(0, h_windows - 1):
        y_p.append(y)
        y += custom_ss - h_overlap
    x_p.append(width - custom_ss)
    y_p.append(height - custom_ss)

    # Cropping
    crop_number = 0
    for i in range(0, len(x_p)):
        for j in range(0, len(y_p)):
            crop_number += 1
            box = (x_p[i], y_p[j], x_p[i] + custom_ss, y_p[j] + custom_ss)
            crop_folder = path.join(CROP_FOLDER, algorithm_crop_folder,
                                    str(custom_ss))
            crop_name = slide_name + '_' + str(i) + 'x' + str(j) + ".png"
            pool.append(
                Thread(target=custom_crop,
                       args=(
                           image,
                           box,
                           crop_folder,
                           crop_name,
                       )))
            pool[-1].start()
    for p in pool:
        p.join()
    elapsed_time = time.time() - start_time
    log.print_debug(
        str(crop_number + 1) + " crops produced || Time Elapsed: " +
        str(elapsed_time))