Ejemplo n.º 1
0
def process_stats_of_single_patch_batch(
    full_result,
    img_height,
    img_width,
    patch_dim,
    patch_shave,
    scale,
    model,
    device,
    date,
):
    """
    Plots and saves data as csv from the single patch every possible batch experiment

    Parameters
    ----------
    full_result : panda dataframe
        result.
    img_height : int
        image height.
    img_width : int
        image width.
    patch_dim : int
        patch dimension.
    patch_shave : int
        patch shave.
    scale : int
        scale value for LR to SR.
    model : str
        EDSR.
    device : str
        GPU or CPU.

    Returns
    -------
    None.

    """

    model_name = model
    device_name = "CPU"
    total_memory = "~"
    device = device
    if device == "cuda":
        _, device_name = ut.get_device_details()
        total_memory, _, _ = ut.get_gpu_details(device,
                                                "\nDevice info:",
                                                logger=None,
                                                print_details=False)
    if device == "cuda":
        plt_title = "Model: {} | GPU: {} | Memory: {} MB".format(
            model_name, device_name, total_memory)
    else:
        plt_title = "Model: {} | Device: {}".format(model_name, "CPU")

    x_data, y_data = (
        np.array(full_result.iloc[:, 0].values).flatten(),
        np.array(full_result.iloc[:, 9].values).flatten(),
    )

    x_label = "Batch size (Image: {}x{}, Patch: {}x{}, Shave: {}, Scale: {})".format(
        img_height, img_width, patch_dim, patch_dim, patch_shave, scale)
    y_label = "Total processing time (sec): LR -> SR"

    plot_stat(x_data, y_data, x_label, y_label, plt_title,
              "total_processing_time", date)

    x_data, y_data = (
        np.array(full_result.iloc[:, 0].values).flatten(),
        np.array(full_result.iloc[:, 1].values).flatten(),
    )
    y_label = "Total patch list creation time (sec): LR -> SR"

    plot_stat(
        x_data,
        y_data,
        x_label,
        y_label,
        plt_title,
        "total_patch_list_creation_time",
        date,
    )

    x_data, y_data = (
        np.array(full_result.iloc[:, 0].values).flatten(),
        np.array(full_result.iloc[:, 3].values).flatten(),
    )
    y_label = "Total CPU to GPU shifting time (sec): LR -> SR"

    plot_stat(x_data, y_data, x_label, y_label, plt_title,
              "total_CPU_2_GPU_time", date)

    x_data, y_data = (
        np.array(full_result.iloc[:, 0].values).flatten(),
        np.array(full_result.iloc[:, 2].values).flatten(),
    )
    y_label = "Total EDSR processing time (sec): LR -> SR"

    plot_stat(x_data, y_data, x_label, y_label, plt_title,
              "total_edsr_processing_time", date)

    x_data, y_data = (
        np.array(full_result.iloc[:, 0].values).flatten(),
        np.array(full_result.iloc[:, 8].values).flatten(),
    )
    y_label = "Total batch processing time (sec): LR -> SR"

    plot_stat(x_data, y_data, x_label, y_label, plt_title,
              "total_batch_processing_time", date)

    x_data, y_data = (
        np.array(full_result.iloc[:, 0].values).flatten(),
        np.array(full_result.iloc[:, 4].values).flatten(),
    )
    y_label = "Total GPU to CPU shifting time (sec): LR -> SR"

    plot_stat(x_data, y_data, x_label, y_label, plt_title,
              "total_GPU_2_CPU_time", date)
Ejemplo n.º 2
0
def batch_range_checker(
    max_dim,
    min_dim,
    patch_shave,
    scale,
    img_path,
    logger,
    dim_gap=1,
    batch_start=1,
    model_name="EDSR",
    device="cuda",
    temp_file_path=None,
):
    """
    Checks maximum valid batch size for every patch dimension from min_dim to max_dim

    Parameters
    ----------
    max_dim : int
        biggest patch dimension to test.
    min_dim : int
        smallest patch dimension to test.
    patch_shave : int
        patch overlapping value.
    scale : int
        scaling value for LR to SR.
    img_path : str
        image path.
    logger : logger object
        keeps log.
    batch_start : int, optional
        smallest batch value to start from. The default is 1.
    device : str, optional
        GPU or CPU. The default is "cuda".

    Raises
    ------
    Exception
        batch size miss-match exception.

    Returns
    -------
    full_result : list of list
        stats of the experiment.

    """

    # Banner for the process
    banner = pyfiglet.figlet_format("Batch Experiment: " + model_name)
    print(banner)

    # Temporary file for saving stats of every batch size
    temp_file = open(temp_file_path, "a")

    # For saving all results
    full_result = []

    # to show memory usages
    used_memory = 0
    last_used_memory = 0

    # tqdm range
    tqdm_range = trange(max_dim, min_dim - 1, -dim_gap)

    for patch_dim in tqdm_range:

        # Show memory status in the tqdm bar
        _, used_memory, _ = ut.get_gpu_details(device,
                                               None,
                                               logger,
                                               print_details=False)
        leaked_memory = (used_memory - last_used_memory
                         if used_memory > last_used_memory else 0)
        tqdm_range.set_description(
            "Patch Dim: {:04}x{:04} | Start Batch Size: {:04} \
            | Used Memory: {:09.3f} | Leaked Memory: {:09.3f}".format(
                patch_dim, patch_dim, batch_start, used_memory, leaked_memory))
        last_used_memory = used_memory

        # If the model couldn't process the maximum dimension in one size batch then stop
        if patch_dim < max_dim and batch_start == 0:
            raise Exception(
                "Batch execution error. Highest patch dimension couldn't be \
                processed in a batch of size 1.")

        # Result of current patch
        result = [patch_dim]
        error_type = [0]
        time_stats = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]

        # Flag for jumping batch sizes when we can predict
        jump = 0

        # Start predicting when we have more than three samples
        if len(full_result) >= 3:

            # Take last three stat rows to predict
            last_three_results = full_result[-3:]

            # Prediction condition
            if (last_three_results[2][1] - last_three_results[1][1] >= 2 and
                    last_three_results[1][1] - last_three_results[0][1] >= 2):
                predicted_batch = int(
                    bp.predict3(
                        tuple(last_three_results[0][0:2]),
                        tuple(last_three_results[1][0:2]),
                        tuple(last_three_results[2][0:2]),
                    ))
                batch_start = predicted_batch
                # Jumping batches: Flag ON
                jump = 1

        # Call subprocess for each batch size for the current patch dimension
        while True:

            # Subprocess
            command = ("python3 " + "helper_batch_patch_forward_chop.py " +
                       " --img_path=" + img_path + " --dimension=" +
                       str(patch_dim) + " --shave=" + str(patch_shave) +
                       " --batch_size=" + str(batch_start) + " --scale=" +
                       str(scale) + " --print_result=" + str(0) +
                       " --device=" + device + " --model_name=" + model_name)
            p = subprocess.run(command, shell=True, capture_output=True)

            # Valid batch size
            if p.returncode == 0:

                # Get the results of the current batch size
                time_stats = list(
                    map(float,
                        p.stdout.decode().split("\n")[0:10]))

                # Increase batch size
                batch_start += 1

                # Logging current valid batch size for the given patch dimension
                logger.info("Patch Dimension {} - Batch Size {}...".format(
                    patch_dim, batch_start))

                # The batch size was a predicted batch size but wasn't valid
                # so the flag turned to -1 and now the reduced batch size is valid
                # so break
                if jump == -1:
                    # Reducing batch size to start from the last valid batch
                    # size for the next patch dimension
                    batch_start -= 1
                    break

                # The batch size was a predicted valid batch size
                elif jump == 1:
                    # Resetting the flag
                    jump = 0

            # Invalid batch size
            else:

                # Batch size greater than total batches
                if p.returncode == 2:
                    error_type[0] = 1
                if p.returncode == 2:
                    logger.error(
                        "\tDimension: {}, Batch size : {}. Batch size larger \
                        than total number of patches".format(
                            patch_dim, batch_start))

                # CUDA memory error
                elif p.returncode == 1:
                    pass

                # Reducing batch size for this or next iteration depends on jump value
                batch_start -= 1

                # It wasn't a predicted batch size so go to next iteration
                if jump == 0:
                    break

                # It was a predicted batch size so don't break and change the jump  flag
                else:
                    # Stay like this till we get a valid batch size while reducing
                    jump = -1

        # Saving result for the last executed patch dimension
        result += [batch_start]
        result += error_type
        result += time_stats

        # Appending to full result
        full_result.append(result)

        # Saving each stat outcome for every patch dimension
        pd.DataFrame([result]).to_csv(temp_file, header=False, index=False)

        # Saving checkpoint to resume
        loader_config = toml.load("../loader_config.toml")
        loader_config["batch_range_experiment"]["last_patch_dim"] = patch_dim
        loader_config["batch_range_experiment"][
            "last_valid_batch"] = batch_start
        loader_config_file = open("../loader_config.toml", "w")
        toml.dump(loader_config, loader_config_file)
        loader_config_file.close()

    # Closing temporary csv file of stats
    temp_file.close()

    # Changing current process status
    loader_config = toml.load("../loader_config.toml")
    loader_config["batch_range_experiment"]["status"] = "finished"
    loader_config_file = open("../loader_config.toml", "w")
    toml.dump(loader_config, loader_config_file)
    loader_config_file.close()

    # Returning result
    return full_result
Ejemplo n.º 3
0
def single_patch_highest_batch_checker(
    patch_dim,
    patch_shave,
    scale,
    img_path,
    logger,
    run=3,
    batch_size_start=1,
    device="cuda",
):
    """
    Calculates timing for every possible batch size for a specific patch dimension and shave value

    Parameters
    ----------
    patch_dim : int
        dimension of patch.
    patch_shave : int
        shave value of patch.
    scale : int
        scale for LR to SR.
    img_path : str
        image path.
    run : int, optional
        total number of run for averaging values. The default is 3.
    batch_size : int, optional
        starting batch size. The default is 10.
    device : str, optional
        GPU or CPU. The default is 'cuda'.

    Returns
    -------
    full_result : list of list
        stats of the experiment.

    """

    # Exception flag to stop the process
    exception = False

    # Container for saving the stats
    full_result = []

    print("Processing...\n")
    while exception == False:

        # Result of one batch size
        result = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
        print("\nBatch size: {}\n".format(batch_size_start))

        # Run for every possible batch size for the given numebr of runs
        for r in tqdm(range(run)):
            temp = [batch_size_start]

            # Subprocess
            command = ("python3 " + "helper_batch_patch_forward_chop.py " +
                       " --img_path=" + img_path + " --dimension=" +
                       str(patch_dim) + " --shave=" + str(patch_shave) +
                       " --batch_size=" + str(batch_size_start) + " --scale=" +
                       str(scale) + " --print_result=" + str(0) +
                       " --device=" + device + " --model_name=" + model_name)
            p = subprocess.run(command, shell=True, capture_output=True)

            # Valid batch size
            if p.returncode == 0:
                temp += list(map(float, p.stdout.decode().split("\n")[1:10]))
                result = [result[i] + temp[i] for i in range(len(temp))]

            # Invalid batch size
            else:

                # Batch size greater than total number of patches
                if p.returncode == 2:
                    logger.error(
                        "\tDimension: {}, Batch size : {}. Batch size larger \
                        than total number of patches".format(
                            patch_dim, batch_size_start))

                # CUDA memory error
                elif p.returncode == 1:
                    logger.error(
                        "\tDimension: {}, Batch size : {}. CUDA out of memory".
                        format(patch_dim, batch_size_start))
                # Logging error
                logger.info("Error: Dimension - {}, Batch size - {}".format(
                    patch_dim, batch_size_start))

                # Logging gpu stats after batch error
                ut.get_gpu_details(device,
                                   state="GPU stat after batch size error:",
                                   logger=logger)

                # Raise falg as the batch size is not valid
                exception = True
                break

        # Mean the result if there was no exception for the given batch size
        if exception == False:
            result = np.array(result) / run
            full_result.append(result)
            batch_size_start += 1

    # Return full result
    print("Process finished!\n")
    return full_result
def maximum_acceptable_dimension(device,
                                 logger,
                                 model,
                                 max_unacceptable_dimension,
                                 model_name="EDSR"):
    """
    Get amximum acceptable dimension

    Parameters
    ----------
    device : str
        device type.
    model : torch.nn.model
        SR model.
    max_unacceptable_dimension : int
        Maximum unacceptable dimension which is apower of 2.

    Returns
    -------
    last : int
        acceptable dimension.

    """
    print("\nGetting maximum acceptable dimension...\n")
    result2 = {}
    dimension = max_unacceptable_dimension
    maxm = math.inf
    minm = -math.inf
    last = 0
    last_used_memory = 0
    iteration = 0
    while True:
        # Printing iterations status
        iteration += 1
        _, used_memory, _ = ut.get_gpu_details(device,
                                               None,
                                               logger,
                                               print_details=False)
        leaked_memory = (used_memory - last_used_memory
                         if used_memory > last_used_memory else 0)
        print(
            "Patch Dimension: {:04}x{:04} | Used Memory: {:09.3f} | Leaked Memory: {:09.3f} | Iteration: {}"
            .format(dimension, dimension, used_memory, leaked_memory,
                    iteration))
        last_used_memory = used_memory

        # Clearing cuda cache:
        ut.clear_cuda(None, None)

        # Binary Search
        if last == dimension:
            break
        process_output = subprocess.run(
            ["python3", "binarysearch_helper.py",
             str(dimension), model_name],
            stdout=subprocess.PIPE,
            text=True,
        )
        if process_output.returncode == 0:
            out = process_output.stdout.split("\n")
            total_time = out[0]
            last = dimension
            if dimension in result2.keys():
                result2[dimension].append(total_time)
            else:
                result2[dimension] = [total_time]
            minm = copy.copy(dimension)
            if maxm == math.inf:
                dimension *= 2
            else:
                dimension = dimension + (maxm - minm) // 2
            ut.clear_cuda(None, None)
        else:
            ut.get_gpu_details(
                device,
                "Runtime error for dimension: {}x{}".format(
                    dimension, dimension),
                logger,
            )
            maxm = copy.copy(dimension)
            if dimension in result2.keys():
                result2[dimension].append(math.inf)
            else:
                result2[dimension] = [math.inf]
            if minm == -math.inf:
                dimension = dimension // 2
            else:
                dimension = minm + (maxm - minm) // 2
            ut.clear_cuda(None, None)
    return last
Ejemplo n.º 5
0
        actual_file_path = folder_name + "/" + file_name
        os.rename(temp_file_path, actual_file_path)

        # Saving gpu stat for the finished process
        meta_data_path = folder_name + "/" + "gpustat.json"
        gpu_command = "gpustat --json > " + meta_data_path
        subprocess.run(gpu_command, shell=True)

        # Meta information to show in the plot
        device_name = "CPU"
        total_memory = "~"
        device = device
        if device == "cuda":
            _, device_name = ut.get_device_details()
            total_memory, _, _ = ut.get_gpu_details(device,
                                                    "\nDevice info:",
                                                    logger=None,
                                                    print_details=False)

        # Loading result from csv to plot
        full_result = pd.read_csv(actual_file_path)

        # Plotting result message
        print("\nPlotting result...\n")

        # Creating plot title
        if device == "cuda":
            plt_title = "Model: {} | GPU: {} | Memory: {} MB".format(
                model_name, device_name, total_memory)
        else:
            plt_title = "Model: {} | Device: {}".format(model_name, "CPU")
def maximum_unacceptable_dimension_2n(device,
                                      logger,
                                      start_dim=2,
                                      model_name="EDSR"):
    """
    Ge the maximum unacceptable dimension which is apower of 2

    Parameters
    ----------
    device : str
        device type.
    model : torch.nn.model
        SR model.

    Returns
    -------
    last_dimension : int
        unacceptabel dimension.

    """
    print(
        "\nGetting maximum unacceptable dimension which is a power of two...\n"
    )
    result1 = {}
    last_dimension = 0
    dimension = start_dim
    last_used_memory = 0
    iteration = 0
    while True:
        # Prinitng loop status
        iteration += 1
        _, used_memory, _ = ut.get_gpu_details(device,
                                               None,
                                               logger,
                                               print_details=False)
        leaked_memory = (used_memory - last_used_memory
                         if used_memory > last_used_memory else 0)
        print(
            "Patch Dimension: {:04}x{:04} | Used Memory: {:09.3f} | Leaked Memory: {:09.3f} | Iteration: {}"
            .format(dimension, dimension, used_memory, leaked_memory,
                    iteration))
        last_used_memory = used_memory

        # Calling SR model for different dimension
        process_output = subprocess.run(
            ["python3", "binarysearch_helper.py",
             str(dimension), model_name],
            stdout=subprocess.PIPE,
            text=True,
        )
        if process_output.returncode == 0:
            out = process_output.stdout.split("\n")
            total_time = out[0]
            if dimension in result1.keys():
                result1[dimension].append(total_time)
            else:
                result1[dimension] = [total_time]
            dimension *= 2
        else:
            ut.get_gpu_details(
                device,
                "Runtime error for dimension: {}x{}".format(
                    dimension, dimension),
                logger,
            )
            if dimension in result1.keys():
                result1[dimension].append(math.inf)
            else:
                result1[dimension] = [math.inf]

            last_dimension = dimension

            ut.clear_cuda(None, None)
            break
    return last_dimension
def do_binary_search(model_name, start_dim):
    """
    Binary search function...

    Returns
    -------
    None.

    """
    # Prints the header banner
    banner = pyfiglet.figlet_format("Binary Search: " + model_name)
    print(banner)

    # Getting logger
    logger = ut.get_logger()

    # Check valid model or not
    if model_name not in ["EDSR", "RRDB"]:
        logger.exception("{} model is unkknown".format(model_name))
        raise Exception("Unknown model...")

    # Device type cpu or cuda
    device = ut.get_device_type()

    if device == "cpu" and model_name not in ["EDSR"]:
        logger.exception("{} model cannot be run in CPU".format(model_name))
        raise Exception("{} model cannot be run in CPU".format(model_name))

    # Device information
    _, device_name = ut.get_device_details()

    if device == "cuda":
        logger.info("Device: {}, Device Name: {}".format(device, device_name))
        ut.get_gpu_details(
            device,
            "Before binary search: {}".format(model_name),
            logger,
            print_details=True,
        )
    else:
        logger.info("Device: {}, Device Name: {}".format(device, device_name))

    # Clearing cuda cache
    ut.clear_cuda(None, None)

    # Getting the highest unacceptable dimension which is a power of 2
    max_unacceptable_dimension = maximum_unacceptable_dimension_2n(
        device, logger, start_dim=start_dim, model_name=model_name)
    print("\nMaximum unacceptable dimension: {}\n".format(
        max_unacceptable_dimension))

    # Clearing cuda cache
    ut.clear_cuda(None, None)

    # Getting the maximum acceptable dimension
    max_dim = maximum_acceptable_dimension(device,
                                           logger,
                                           None,
                                           max_unacceptable_dimension,
                                           model_name=model_name)
    print("\nMaximum acceptable dimension: {}\n".format(max_dim))

    # Clearing cuda cache
    ut.clear_cuda(None, None)

    # For batch processing
    config = toml.load("../batch_processing.toml")
    config["end_patch_dimension"] = max_dim
    f = open("../batch_processing.toml", "w")
    toml.dump(config, f)

    # for linear search
    config = toml.load("../config.toml")
    config["max_dim"] = max_dim
    f = open("../config.toml", "w")
    toml.dump(config, f)
Ejemplo n.º 8
0
def do_linear_search(test=False, test_dim=32):
    """
    Linear search function...

    Returns
    -------
    None.

    """
    logger = ut.get_logger()

    device = "cuda"
    model_name = "EDSR"
    config = toml.load("../config.toml")
    run = config["run"]
    scale = int(config["scale"]) if config["scale"] else 4
    # device information
    _, device_name = ut.get_device_details()
    total, _, _ = ut.get_gpu_details(
        device, "\nDevice info:", logger, print_details=False
    )
    log_message = (
        "\nDevice: "
        + device
        + "\tDevice name: "
        + device_name
        + "\tTotal memory: "
        + str(total)
    )
    logger.info(log_message)

    ut.clear_cuda(None, None)

    state = "Before loading model: "
    total, used, _ = ut.get_gpu_details(device, state, logger, print_details=True)

    model = md.load_edsr(device=device)

    state = "After loading model: "
    total, used, _ = ut.get_gpu_details(device, state, logger, print_details=True)

    # =============================================================================
    #     file = open("temp_max_dim.txt", "r")
    #     line = file.read()
    #     max_dim = int(line.split(":")[1])
    # =============================================================================
    config = toml.load("../config.toml")
    max_dim = int(config["max_dim"])
    if test == False:
        detailed_result, memory_used, memory_free = result_from_dimension_range(
            device, logger, config, model, 1, max_dim
        )
    else:
        detailed_result, memory_used, memory_free = result_from_dimension_range(
            device, logger, config, model, test_dim, test_dim
        )
    if test == False:
        # get mean
        # get std
        mean_time, std_time = ut.get_mean_std(detailed_result)
        mean_memory_used, std_memory_used = ut.get_mean_std(memory_used)
        mean_memory_free, std_memory_free = ut.get_mean_std(memory_free)

        # make folder for saving results
        plt_title = "Model: {} | GPU: {} | Memory: {} MB".format(
            model_name, device_name, total
        )
        date = "_".join(str(time.ctime()).split())
        date = "_".join(date.split(":"))
        foldername = date
        os.mkdir("results/" + foldername)
        # plot data
        ut.plot_data(
            foldername,
            "dimension_vs_meantime",
            mean_time,
            "Dimensionn of Patch(nxn)",
            "Mean Processing Time: LR -> SR, Scale: {} ( {} runs )".format(scale, run),
            mode="mean time",
            title=plt_title,
        )
        ut.plot_data(
            foldername,
            "dimension_vs_stdtime",
            std_time,
            "Dimension n of Patch(nxn)",
            "Std of Processing Time: LR -> SR, Scale: {} ( {} runs )".format(
                scale, run
            ),
            mode="std time",
            title=plt_title,
        )
        ut.plot_data(
            foldername,
            "dimension_vs_meanmemoryused",
            mean_memory_used,
            "Dimension n of Patch(nxn)",
            "Mean Memory used: LR -> SR, Scale: {} ( {} runs )".format(scale, run),
            mode="mean memory used",
            title=plt_title,
        )
        ut.plot_data(
            foldername,
            "dimension_vs_stdmemoryused",
            std_memory_used,
            "Dimension n of Patch(nxn)",
            "Std Memory Used: LR -> SR, Scale: {} ( {} runs )".format(scale, run),
            mode="std memory used",
            title=plt_title,
        )
        ut.plot_data(
            foldername,
            "dimension_vs_meanmemoryfree",
            mean_memory_free,
            "Dimension n of Patch(nxn)",
            "Mean Memory Free: LR -> SR, Scale: {} ( {} runs )".format(scale, run),
            mode="mean memory free",
            title=plt_title,
        )
        ut.plot_data(
            foldername,
            "dimension_vs_stdmemoryfree",
            std_memory_free,
            "Dimension n of Patch(nxn)",
            "Std Memory Free: LR -> SR, Scale: {} ( {} runs )".format(scale, run),
            mode="std memory free",
            title=plt_title,
        )
        # save data
        ut.save_csv(
            foldername,
            "total_stat",
            device,
            device_name,
            total,
            mean_time,
            std_time,
            mean_memory_used,
            std_memory_used,
            mean_memory_free,
            std_memory_free,
        )
Ejemplo n.º 9
0
def result_from_dimension_range(device, logger, config, model, first, last):
    """
    Get detailed result for every dimension from 1 to the last acceptable dimension

    Parameters
    ----------
    device : str
        device type.
    model : torch.nn.model
        SR model.
    first : int
        starting dimension.
    last : int
        last acceptable dimension.
    run : int, optional
        total run to average the result. The default is 10.

    Returns
    -------
    result3 : dictionary
        time for every dimension.
    memory_used : dictionary
        memory used per dimension.
    memory_free : dictionary
        memory free per dimension.

    """
    run = config["run"]
    print("\nPreparing detailed data... ")
    result3 = {}
    memory_used = {}
    memory_free = {}
    for i in range(run):
        print("\nRun: ", i + 1)
        print()
        for dim in tqdm(range(first, last + 1)):
            dimension = dim
            input_image = ut.random_image(dimension)
            input_image = input_image.to(device)
            with torch.no_grad():
                try:
                    print("\n")
                    print(input_image.shape)
                    print(input_image[0, 0, 0, 0:5])
                    start = time.time()
                    output_image = model(input_image)
                    end = time.time()
                    total_time = end - start
                    print("Processing time: ", total_time)
                    print("\n")
                    if dimension in result3.keys():
                        result3[dimension].append(total_time)
                        _, used, free = ut.get_gpu_details(
                            device, "", None, print_details=False
                        )
                        memory_used[dimension].append(used)
                        memory_free[dimension].append(free)
                    else:
                        result3[dimension] = [total_time]
                        _, used, free = ut.get_gpu_details(
                            device, "", None, print_details=False
                        )
                        memory_used[dimension] = [used]
                        memory_free[dimension] = [free]
                    ut.clear_cuda(input_image, output_image)
                except RuntimeError as err:
                    logger.exception("\nDimension NOT OK!")

                    state = "\nGPU usage after dimension exception...\n"
                    ut.get_gpu_details(device, state, logger, print_details=True)

                    output_image = None
                    ut.clear_cuda(input_image, output_image)

                    state = f"\nGPU usage after clearing the image {dimension}x{dimension}...\n"
                    ut.get_gpu_details(device, state, logger, print_details=True)
                    break
        ut.clear_cuda(None, None)
        subprocess.run("gpustat", shell=True)
    return result3, memory_used, memory_free
Ejemplo n.º 10
0
def check_differnet_patches_in_forward_chop(min_dim,
                                            max_dim,
                                            shave,
                                            image_path,
                                            gap=1,
                                            run=1,
                                            device="cuda"):
    """
    Experiments iterative forward chop for different patch dimensions

    Parameters
    ----------
    min_dim : int
        starting patch dimension.
    max_dim : int
        ending patch dimension.
    shave : int
        overlapping pixel amount.
    image_path : str
        image path.
    gap : int, optional
        gap between two patch dimension. The default is 1.
    run : int, optional
        total number of run for averaging. The default is 1.
    device : str, optional
        GPU or CPU. The default is "cuda".

    Raises
    ------
    Exception
        DESCRIPTION.

    Returns
    -------
    None.

    """
    logger = ut.get_logger()
    logger.info(
        "Checking different patches with dimension from {} to {} in iterative forward chop...\n"
        .format(min_dim, max_dim))
    model_name = "EDSR"
    device_name = "CPU"
    total_memory = "~"
    device = device
    if device == "cuda":
        _, device_name = ut.get_device_details()
        total_memory, _, _ = ut.get_gpu_details(device,
                                                "\nDevice info:",
                                                logger=None,
                                                print_details=False)
    print_result = "0"
    full_result = []
    for d in tqdm(range(min_dim, max_dim + 1, gap)):
        s = [0, 0, 0, 0, 0]
        ut.get_gpu_details("cuda",
                           "GPU status before patch: ({}x{}):".format(d, d),
                           logger)
        for r in range(run):
            temp = [d]
            command = ("python3 " + "forward_chop.py " + image_path + " " +
                       str(d) + " " + str(shave) + " " + print_result + " " +
                       device)
            p = subprocess.run(command, shell=True, capture_output=True)
            if p.returncode == 0:
                temp += list(map(float, p.stdout.decode().split()[1:]))
                s = [s[i] + temp[i] for i in range(len(temp))]
            else:
                raise Exception(p.stderr)
                break

        s = np.array(s) / run
        full_result.append(s)

    full_result = pd.DataFrame(full_result)
    full_result.columns = [
        "Dimension",
        "EDSR Processing Time",
        "Cropping time",
        "Shifting Time",
        "CUDA Cleanign Time",
    ]

    if device == "cuda":
        plt_title = "Model: {} | GPU: {} | Memory: {} MB".format(
            model_name, device_name, total_memory)
    else:
        plt_title = "Model: {} | Device: {}".format(model_name, "CPU")

    date = "_".join(str(time.ctime()).split())
    date = "_".join(date.split(":"))

    x_data, y_data = (
        np.array(full_result.iloc[:, 0].values).flatten(),
        np.array(full_result.iloc[:, 1].values).flatten(),
    )
    x_label = "Dimension n of patch(nxn)"
    y_label = "Processing time (sec): LR -> SR"
    plt.xlabel(x_label)
    plt.ylabel(y_label)
    plt.title(plt_title)
    plt.plot(x_data, y_data)
    plt.savefig(
        "results/forward_chop_experiment/{0}.png".format("processing_time_" +
                                                         date))
    plt.show()

    x_data, y_data = (
        np.array(full_result.iloc[:, 0].values).flatten(),
        np.array(full_result.iloc[:, 2].values).flatten(),
    )
    x_label = "Dimension n of patch(nxn)"
    y_label = "Cropping time (sec): LR -> SR"
    plt.xlabel(x_label)
    plt.ylabel(y_label)
    plt.title(plt_title)
    plt.plot(x_data, y_data)
    plt.savefig(
        "results/forward_chop_experiment/{0}.png".format("cropping_time_" +
                                                         date))
    plt.show()

    x_data, y_data = (
        np.array(full_result.iloc[:, 0].values).flatten(),
        np.array(full_result.iloc[:, 3].values).flatten(),
    )
    x_label = "Dimension n of patch(nxn)"
    y_label = "Shifting time (sec): LR -> SR"
    plt.xlabel(x_label)
    plt.ylabel(y_label)
    plt.title(plt_title)
    plt.plot(x_data, y_data)
    plt.savefig(
        "results/forward_chop_experiment/{0}.png".format("shfiting_time_" +
                                                         date))
    plt.show()

    x_data, y_data = (
        np.array(full_result.iloc[:, 0].values).flatten(),
        np.array(full_result.iloc[:, 4].values).flatten(),
    )
    x_label = "Dimension n of patch(nxn)"
    y_label = "Cleaning time (sec): LR -> SR"
    plt.xlabel(x_label)
    plt.ylabel(y_label)
    plt.title(plt_title)
    plt.plot(x_data, y_data)
    plt.savefig("results/forward_chop_experiment/{0}.png".format(
        "cuda_cleaning_time_" + date))
    plt.show()

    filename = "stat_" + "EDSR_forward_processing_iterative_" + date
    file = open("results/" + "forward_chop_experiment" + "/" + filename, "a")
    file.write(device + "\n")
    file.write(device_name + "\n")
    file.write("Memory: " + str(total_memory) + "MB\n")
    full_result.to_csv(file)
    file.close()