Ejemplo n.º 1
0
    def __init__(self, bot):
        self.bot = bot
        self.guild = None
        self.role_name_to_obj = None
        self.role_name_to_info = None
        self.supporter_role = None

        self.command_channels = utilities.config[
            ("test_" if os.getenv("mode") == "test" else "") +
            "command_channels"]
        self.announcement_channel = utilities.config[
            ("test_" if os.getenv("mode") == "test" else "") +
            "announcement_channel"]
        # TODO fix when files not existent
        self.data_change_logger = utilities.get_logger(
            "study_executor_data_change", "data_change.log")
        self.time_counter_logger = utilities.get_logger(
            "study_executor_time_counter", "discord.log")
        self.heartbeat_logger = utilities.get_logger(
            "study_executor_heartbeat", "heartbeat.log")
        self.redis_client = utilities.get_redis_client()
        engine = utilities.get_engine()
        Session = sessionmaker(bind=engine)
        self.sqlalchemy_session = Session()
        self.timezone_session = utilities.get_timezone_session()
        self.make_heartbeat.start()
        self.birthtime = utilities.get_time()
Ejemplo n.º 2
0
    def __init__(self, bot):
        self.bot = self.client = bot
        self.guild = None
        self.role_objs = None
        self.role_names = None
        self.supporter_role = None

        self.data_change_logger = utilities.get_logger(
            "study_executor_data_change", "data_change.log")
        self.time_counter_logger = utilities.get_logger(
            "study_executor_time_counter", "discord.log")
        self.redis_client = utilities.get_redis_client()
        engine = utilities.get_engine()
        Session = sessionmaker(bind=engine)
        self.sqlalchemy_session = Session()
        self.timezone_session = utilities.get_timezone_session()
        self.birthtime = utilities.get_time()
Ejemplo n.º 3
0
 def __init__(self, template, rect):
     self.graphics = template.graphics
     self.event_listener = template.event_listener
     self.gel = template.gel
     self.log = get_logger("gui")
     super(BaseTemplate, self).__init__(template, rect)
     self.background = COLOR_BLACK
     self.__templates = {}
     self.__active = None
Ejemplo n.º 4
0
    def __init__(self, bot):
        self.bot = bot
        self.guild = None
        self.role_objs = None
        self.role_name_to_obj = None
        self.supporter_role = None

        # TODO fix when files not existent
        self.time_counter_logger = utilities.get_logger(
            "study_executor_time_counter", "discord.log")
        self.heartbeat_logger = utilities.get_logger(
            "study_executor_heartbeat", "heartbeat.log")
        self.redis_client = utilities.get_redis_client()
        engine = utilities.get_engine()
        Session = sessionmaker(bind=engine)
        self.sqlalchemy_session = Session()
        self.timezone_session = utilities.get_timezone_session()
        self.make_heartbeat.start()
        self.birthtime = utilities.get_time()
Ejemplo n.º 5
0
 def __init__(self, output_manager):
     """ Initialize the DataExtractorActor class.
     :param output_manager: The output manager actor.
     :raises: DataExtractorError - If output manager is None.
     """
     super(DataExtractorActor, self).__init__()
     if output_manager is None:
         raise DataExtractorError("Output data manager cannot be None.")
     self._log = get_logger()
     self._output_manager = output_manager
Ejemplo n.º 6
0
 def __init__(self, db_path):
     """Initialize the DbCreatorActor class.
     :param db_path: Path to database.
     :raises: DbCreatorError if db_path is None or empty.
     """
     super(DbCreatorActor, self).__init__()
     if not db_path:
         raise DbCreatorError('Application must be initialized with valid database path.')
     self._log = get_logger()
     self._db_path = Path(db_path)
     self.validate_db_connection()
Ejemplo n.º 7
0
    def __init__(self, graphics):
        self.__log = get_logger("world")
        self.graphics = graphics
        self.entity_manager = EntityManager(self)
        self.team_manager = TeamManager()
        self.action_manager = ActionManager()

        self.__cfg_header = "header"
        self.__cfg_entities = "entities"
        self.__cfg_map = "map"
        self.__started = False
Ejemplo n.º 8
0
def get_graphics(headless):
    """Selects the correct graphics"""
    log = get_logger("graphics")
    if headless:
        log.info("HeadLess mode, using NoneGraphics")
        return NoneGraphics(log)
    elif pygame:
        log.info("Pygame detected, using as graphics interface")
        return PygameGraphics(log)
    else:
        log.error("No any graphics interface is usable, returning BaseGraphics")
        return BaseGraphics(log)
Ejemplo n.º 9
0
def get_event_listener(headless):
    """Returns a usable event listener"""
    log = get_logger("event")
    if headless:
        log.info("HeadLess mode, using GenericEventListener")
        return GenericEventListener(log)
    elif pygame:
        log.info("Pygame detected, using as event interface")
        return PygameEventListener(log)
    else:
        log.error("No any event interface is usable, returning GenericEventListener")
        return GenericEventListener(log)
Ejemplo n.º 10
0
    def __init__(self, bot):
        self.bot = bot

        # images
        self.mock_img = 'images/mock.jpg'
        self.yike_img = 'images/yike.png'

        self.logger = util.get_logger()
        self.quote_channel_id = 178576825511837696
        self.bot_channel_id = 556668127786827776
        self.voice = {}

        self.reminder_loop.start()
Ejemplo n.º 11
0
 def __init__(self):
     super(SocketHandler, self).__init__()
     self.__run_lock = Lock()  # Lock that prevents closing handler while Thread is inside run() loop
     self.__recv_queue = Queue()  # Queue of received data from socket
     self.__send_queue = Queue()  # Queue of data to send trough socket
     self.log = get_logger("socket_%s" % self.name)
     self.log.info("Created new socket")
     self.__socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)  # UDP
     self.__socket.setblocking(False)
     self.__address = None
     self.__port = None
     self.__error_flag = Event()
     self.start()
Ejemplo n.º 12
0
def init_main():
    """Main initializator for catching exceptions"""
    try:
        #Call the main()
        main_log = get_logger("main")
        error_flag = Event()
        result = controlled_exception(main, main_log, error_flag, True)
        main_log.debug("main() return: '%s'" % result)

        main_log.debug("Active Threads: %s" % threading_enumerate())
        unreachable = gc.collect()
        main_log.debug("Garbage Collector stats: unreachable %s, total %s, list %s" % (unreachable, len(gc.garbage), gc.garbage))
        if len(gc.garbage):
            for target in gc.garbage:
                main_log.warning("target %s, referrers %s, referents %s" % (target, gc.get_referrers(target), gc.get_referents(target)))

        logging.shutdown()
    except: # pylint: disable=W0702
        print_exc()
        print("Error ocurred at init_main")
Ejemplo n.º 13
0
def client_main():
    # Prepare
    log = get_logger("Client", [stdout])
    error_count = 0
    error_flag = Event()
    exit_flag = Event()

    # Main loop
    while not exit_flag.is_set():
        safe_call(client, log, error_flag, exit_flag, True)
        if error_flag.is_set():
            if error_count < ERROR_COUNT_MAX:
                error_flag.clear()
                error_count += 1
            else:
                break

    if error_flag.is_set():
        show_message(log, logging.ERROR, "Error occurred %s times in client loop" % error_count)
    else:
        log.info("Finished correctly")
Ejemplo n.º 14
0
def main(**args):
    input_dir_1 = args["input_dir_1"]
    input_dir_2 = args["input_dir_2"]
    input_type = args["input_type"]
    output_file = args["output_file"]
    log_file = args["log_file"]
    verbose = args["verbose"]

    # Remove old output file
    if os.path.exists(output_file):
        os.remove(output_file)

    # Remove old log file
    if os.path.exists(output_file):
        os.remove(output_file)

    # Start merging conll
    if input_type == 'dir':
        print("***************Merging Files***************")
        a1_tokens, a2_tokens = merger(input_dir_1, input_dir_2, output_file,
                                      verbose)
    elif input_type == 'csv':
        a1_tokens, a2_tokens = getTokensFromCSV(input_dir_1, output_file)
    else:
        a1_tokens, a2_tokens = getTokens(input_dir_1, input_dir_2, output_file)

    # Start evaluating merged conll file
    logger = utilities.get_logger(log_file)
    print("***************F1 Evaluation Metric***************")
    e.evaluate_conll_file(logger=logger,
                          fileName=output_file,
                          raw=True,
                          delimiter=None,
                          oTag='O',
                          latex=False)

    print("***************Kappa Evaluation Metric***************")
    getKappa(a1_tokens, a2_tokens, logger, input_type)
Ejemplo n.º 15
0
def main(**args):
    input_file = args["input_file"]
    save_path = args["output_dir"]
    verbose = args["verbose"]
    kfold = args["kfold"]
    pos = args["pos"]
    log_file = args["log_file"]
    
    logger = utilities.get_logger(log_file)
    
    # Clean up output directory
    if os.path.exists(save_path):
        shutil.rmtree(save_path)

    os.mkdir(save_path)
    
    # Start splitting dataset
    # into respective directory
    for i in range(0, kfold):
        final_path = os.path.join(save_path, str(i+1))
        if not os.path.exists(final_path):
            os.mkdir(final_path)
        split(input_file, final_path, verbose, logger, pos)
Ejemplo n.º 16
0
def main(**args):
    input_file = args["input_file"]
    save_path = args["output_dir"]
    verbose = args["verbose"]
    kfold = args["kfold"]
    csv = args["csv"]
    log_dir = args["log_dir"]
    split_type = args["split_type"]
    seq_len = (args["min_seq_len"], args["max_seq_len"])

    if not os.path.exists(log_dir):
        os.mkdir(log_dir)

    log_file = os.path.join(log_dir, 'data.log')

    logger = utilities.get_logger(log_file)

    # Clean up output directory
    if not csv:
        save_path = os.path.join(save_path, split_type)

    if os.path.exists(save_path):
        shutil.rmtree(save_path)

    os.mkdir(save_path)

    # Start splitting dataset
    # into respective directory
    for i in range(0, kfold):
        final_path = os.path.join(save_path, str(i + 1))
        if not os.path.exists(final_path):
            os.mkdir(final_path)
        if not csv:
            split(input_file, final_path, verbose, logger, split_type, seq_len)
        else:
            split_csv(input_file, final_path, logger)
Ejemplo n.º 17
0
def server_main():
    # Prepare
    log = get_logger("Main", [stdout])
    os_type = running_os()
    if os_type is None:
        show_message(log, logging.ERROR, "OS type is unknown")
        return

    error_count = 0
    error_flag = Event()
    exit_flag = Event()
    index = CAM_INDEX[os_type]

    # Main loop
    detection = Detection(index, external_function)
    while not detection.stop_flag:
        safe_call(detection.process, log, error_flag, exit_flag, True)
        if exit_flag.is_set():
            break
        if error_flag.is_set():
            if error_count < ERROR_COUNT_MAX:
                error_flag.clear()
                error_count += 1
            else:
                break

    safe_call(detection.close, log, error_flag, exit_flag, False)

    if error_flag.is_set():
        show_message(log, logging.ERROR, "Error occurred %s times in loop" % error_count)
    else:
        log.info("Finished correctly")

    if os_type is SYSTEMS.WINDOWS:
        # This fixes some hang when closing with exit(), instead we call OS to kill ourself, how nice and clean /irony off
        sys_call("taskkill /F /PID %i" % getpid(), shell=True)
Ejemplo n.º 18
0
Created on Apr 12, 2013

@author: Fang Jiaguo
'''
from Queue import Queue
from bson.objectid import ObjectId
from lxml import html
from pyes.query import MultiMatchQuery, Search
from pymongo.errors import PyMongoError
from settings import settings, elasticsearch, mongodb
from urllib2 import HTTPError, URLError
from utilities import LimitedCaller, send_request, get_logger, calc_similarity
import datetime
import threading

logger = get_logger('OnlineMovieCrawler', 'online_movie_crawler.log')
movie_pool = Queue()

class IQIYIMovieCrawler(threading.Thread):
    def __init__(self):
        self.provider = 'iqiyi'
        self.logger = logger.getChild('IQIYIMovieCrawler')
        self.request_iqiyi_page = LimitedCaller(send_request, settings['online_movie_crawler']['iqiyi_movie_crawler']['reqs_per_min'])
        threading.Thread.__init__(self)

    def run(self):
        page_index = 0
        while True:
            if page_index == 0:
                self.logger.info('==========IQIYIMovieCrawler Started=========')
Ejemplo n.º 19
0
"""
Pytest for Collection

"""

__author__ = "jerome.colin'at'cesbio.cnes.fr"
__license__ = "MIT"
__version__ = "1.0.3"

import Collection
import utilities
import os

TEST_DATA_PATH = os.environ['TEST_DATA_PATH']

logger = utilities.get_logger('test_Collection', verbose=True)

venus_collection = Collection.Collection(TEST_DATA_PATH + "venus_collection/",
                                         logger)
acix_maja_collection = Collection.Collection(
    TEST_DATA_PATH + "acix_carpentras/", logger)
acix_vermote_collection = Collection.Collection(
    TEST_DATA_PATH + "vermote_carpentras/", logger)


def test_discover():
    assert venus_collection.type_count == [2, 0, 0, 0]
    assert acix_vermote_collection.type_count == [0, 6, 0, 0]
    assert acix_maja_collection.type_count == [0, 0, 6, 0]

Ejemplo n.º 20
0
import os
import subprocess
from datetime import timedelta
from time import sleep

from dotenv import load_dotenv

import utilities

load_dotenv("dev.env")
logger = utilities.get_logger("main", "heartbeat.log")

proc = None
line = utilities.get_last_line()
# TODO: fix - Dangerous - need to make sure it's our process
utilities.kill_last_process(line)

while True:
    try:
        line = utilities.get_last_line()
        last_time = utilities.get_last_time(line)
        max_diff_var_name = ("test_" if os.getenv("mode") == "test" else
                             "") + "heart_attack_interval_sec"
        max_diff_sec = int(os.getenv(max_diff_var_name))
        max_diff = timedelta(seconds=max_diff_sec)

        if (not last_time) or utilities.get_time() - last_time > max_diff:
            # Process has died. Restart it
            proc = subprocess.Popen(['python3', './time_counter.py'])
            logger.info(f"{utilities.get_time()} birth with pid {proc.pid}")
Ejemplo n.º 21
0
Pytest for Collection

"""

__author__ = "jerome.colin'at'cesbio.cnes.fr"
__license__ = "MIT"
__version__ = "1.0.3"

import Collection
import Comparison
import utilities
import os

TEST_DATA_PATH = os.environ['TEST_DATA_PATH']

logger = utilities.get_logger('test_Comparison', verbose=True)


def test_Comparison_basic():
    logger.info("test_Comparison")
    acix_maja_collection = Collection.Collection(
        TEST_DATA_PATH + "acix_carpentras/", logger)
    acix_vermote_collection = Collection.Collection(
        TEST_DATA_PATH + "vermote_carpentras/", logger)

    compare = Comparison.Comparison(acix_vermote_collection,
                                    acix_maja_collection, logger)
    assert sorted(compare.matching_products,
                  key=lambda x: x[1])[0][0] == "20171005"
    assert sorted(compare.matching_products, key=lambda x: x[1])[0][
        1] == TEST_DATA_PATH + "vermote_carpentras/refsrs2-L1C_T31TFJ_A003037_20171005T104550-Carpentras.hdf"
Ejemplo n.º 22
0
from pymongo.errors import PyMongoError
from sets import Set
from settings import settings, mongodb
from urllib2 import HTTPError, URLError
from urlparse import urldefrag
from utilities import LimitedCaller, get_logger, send_request
import datetime
import gzip
import json
import md5
import os
import re
import threading
import time

logger = get_logger('DoubanCrawler', 'douban_crawler.log')
tag_regex = re.compile(settings['douban_crawler']['tag_regex'])
movie_regex = re.compile(settings['douban_crawler']['movie_regex'])
request_douban_page = LimitedCaller(send_request, settings['douban_crawler']['page_reqs_per_min'])
request_douban_api = LimitedCaller(send_request, settings['douban_crawler']['api_reqs_per_min'])
tag_url_pool = Queue()
movie_url_pool = Queue()
crawled_url_pool = Set()
low_priority_movie_pool = Queue()  # low priority
high_priority_movie_pool = Queue()  # high priority
# in_theaters_movie_ids = Queue()
# coming_soon_movie_ids = Queue()
# top250_movie_ids = Queue()

class InitialCrawler(threading.Thread):
    def __init__(self):
Ejemplo n.º 23
0
Created on Apr 12, 2013

@author: Fang Jiaguo
'''
from Queue import Queue
from bson.objectid import ObjectId
from lxml import html
from pyes.query import MultiMatchQuery, Search
from pymongo.errors import PyMongoError
from settings import settings, elasticsearch, mongodb
from urllib2 import HTTPError, URLError
from utilities import LimitedCaller, send_request, get_logger, calc_similarity
import datetime
import threading

logger = get_logger('OnlineMovieCrawler', 'online_movie_crawler.log')
movie_pool = Queue()


class IQIYIMovieCrawler(threading.Thread):
    def __init__(self):
        self.provider = 'iqiyi'
        self.logger = logger.getChild('IQIYIMovieCrawler')
        self.request_iqiyi_page = LimitedCaller(
            send_request, settings['online_movie_crawler']
            ['iqiyi_movie_crawler']['reqs_per_min'])
        threading.Thread.__init__(self)

    def run(self):
        page_index = 0
        while True:
        model.eval()
        input_image = ut.random_image(dimension)
        if model_name == "RRDB":
            input_image = input_image[:, 2:, :, :]
        input_image = input_image.to(device)
        with torch.no_grad():
            start = time.time()
            print('Before processing: ')
            subprocess.run("gpustat", shell=True)
            output_image = model(input_image)
            print('After processing: ')
            subprocess.run("gpustat", shell=True)
            end = time.time()
            total_time = end - start
            ut.clear_cuda(input_image, output_image)
        model.cpu()
        del model
        print('After model shifting and deleting: ')
        subprocess.run("gpustat", shell=True)
    except RuntimeError as err:
        logger.error("Runtime error for dimension: {}x{}: " + err)
        sys.exit(1)
    return total_time


if __name__ == "__main__":
    sys.excepthook = ut.exception_handler
    logger = ut.get_logger()
    print(
        binary_search_helper(int(sys.argv[1]), logger, model_name=sys.argv[2]))
Ejemplo n.º 25
0
 def test_configure_logger(self, init_config, clean_up):
     configure_logger()
     assert get_logger() is not None
     clean_up()
def main():

    data_dir = "../data/"
    os.makedirs(data_dir, exist_ok=True)

    np.set_printoptions(suppress=True)

    (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()

    x_train, y_train = sklearn.utils.shuffle(x_train, y_train)
    x_test, y_test = sklearn.utils.shuffle(x_test, y_test)

    logger = utilities.get_logger(
        os.path.join(data_dir, "mnist_tensorflow.html"))

    log_size = 10

    # Log a few samples
    utilities.log_samples(logger, x_test[:log_size], y_test[:log_size])

    # Reshape 28x28 matrices to 784 elements vectors
    x_train_flat = x_train.reshape(60000, 784)
    x_test_flat = x_test.reshape(10000, 784)

    # ys are scalars, convert them to one-hot encoded vectors
    y_train_categorical = keras.utils.to_categorical(y_train, num_classes=10)
    y_test_categorical = keras.utils.to_categorical(y_test, num_classes=10)

    model = Model()

    batch_size = 32
    epochs = 10

    with tf.Session() as session:

        session.run(tf.global_variables_initializer())

        # Log untrained model predictions
        log_predictions(logger,
                        model,
                        session,
                        x_test[:log_size],
                        y_test[:log_size],
                        header="Untrained model")

        training_loss, training_accuracy = get_statistics(
            session, model, x_train_flat, y_train_categorical, batch_size)

        print(
            "Initial training loss: {:.3f}, training accuracy: {:.3f}".format(
                training_loss, training_accuracy))

        test_loss, test_accuracy = get_statistics(session, model, x_test_flat,
                                                  y_test_categorical,
                                                  batch_size)

        print("Initial test loss: {:.3f}, test accuracy: {:.3f}".format(
            test_loss, test_accuracy))

        model.fit(session, x_train_flat, y_train_categorical, epochs,
                  x_test_flat, y_test_categorical, batch_size)

        # Log trained model predictions
        log_predictions(logger,
                        model,
                        session,
                        x_test[:log_size],
                        y_test[:log_size],
                        header="Trained model")
Ejemplo n.º 27
0
def main():

    data_dir = "../data/"
    os.makedirs(data_dir, exist_ok=True)

    np.set_printoptions(suppress=True)

    (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()

    x_train, y_train = sklearn.utils.shuffle(x_train, y_train)
    x_test, y_test = sklearn.utils.shuffle(x_test, y_test)

    logger = utilities.get_logger(os.path.join(data_dir, "mnist.html"))

    log_size = 10

    # Log a few samples
    utilities.log_samples(logger, x_test[:log_size], y_test[:log_size])

    # Reshape 28x28 matrices to vectors 784 elements vectors
    x_train_flat = x_train.reshape(60000, 784, 1)
    x_test_flat = x_test.reshape(10000, 784, 1)

    # ys are scalars, convert them to one-hot encoded vectors
    y_train_categorical = keras.utils.to_categorical(y_train,
                                                     num_classes=10).reshape(
                                                         60000, 10, 1)
    y_test_categorical = keras.utils.to_categorical(y_test,
                                                    num_classes=10).reshape(
                                                        10000, 10, 1)

    model = net.Network(layers=[784, 100, 50, 10])

    # Log untrained model predictions
    log_predictions(logger,
                    model,
                    x_test[:log_size],
                    y_test[:log_size],
                    header="Untrained model")

    train_cost, train_accuracy = net.get_statistics(model, x_train_flat,
                                                    y_train_categorical)
    print("Initial training cost: {:.3f}, training accuracy: {:.3f}".format(
        train_cost, train_accuracy))

    test_cost, test_accuracy = net.get_statistics(model, x_test_flat,
                                                  y_test_categorical)
    print("Initial test cost: {:.3f}, test accuracy: {:.3f}".format(
        test_cost, test_accuracy))

    model.fit(x_train_flat,
              y_train_categorical,
              epochs=10,
              learning_rate=0.1,
              x_test=x_test_flat,
              y_test=y_test_categorical)

    # Log trained model predictions
    log_predictions(logger,
                    model,
                    x_test[:log_size],
                    y_test[:log_size],
                    header="Trained model")
Ejemplo n.º 28
0
def batch_forward_chop(
    patch_list,
    batch_size,
    channel,
    img_height,
    img_width,
    dim,
    shave,
    scale,
    model,
    device="cuda",
    print_timer=True,
):
    """
    Create SR image from batches of patches

    Parameters
    ----------
    patch_list : list
        list of patches.
    batch_size : int
        batch size.
    channel : int
        input image channel.
    img_height : int
        input image height.
    img_width : int
        input image width.
    dim : int
        patch dimension.
    shave : int
        shave value for patch.
    scale : int
        scale for LR to SR.
    model : nn.Module
        SR model.
    device : str, optional
        GPU or CPU. The default is 'cuda'.
    print_timer : bool, optional
        Print result or not. The default is True.

    Raises
    ------
    Exception
        DESCRIPTION.

    Returns
    -------
    3D matrix, tuple
        output_image, tuple of timings.

    """
    logger = ut.get_logger()
    total_patches = len(patch_list)
    if batch_size > total_patches:
        sys.exit(2)
        raise Exception("Batch size greater than total number of patches")
    output_image = torch.tensor(
        np.zeros((channel, img_height * scale, img_width * scale)))

    cpu_to_gpu_time = 0
    gpu_to_cpu_time = 0
    batch_creating_time = 0
    total_EDSR_time = 0
    cuda_clear_time = 0
    merging_time = 0
    for start in range(1, total_patches + 1, batch_size):
        info = ""
        try:
            batch_creating_timer = ut.timer()
            batch = []
            end = start + batch_size
            if start + batch_size > total_patches:
                end = total_patches + 1
            for p in range(start, end):
                batch.append(patch_list[p][4])
            batch_creating_time += batch_creating_timer.toc()

            torch.cuda.synchronize()
            cpu_to_gpu_timer = ut.timer()
            batch = torch.stack(batch).to(device)
            torch.cuda.synchronize()
            cpu_to_gpu_time += cpu_to_gpu_timer.toc()
            info = (info + "C2G Starts: " + str(cpu_to_gpu_timer.t0) +
                    "C2G total: " + str(cpu_to_gpu_time))
            # =============================================================================
            #             print(batch.shape)
            #             subprocess.run("gpustat", shell=True)
            # =============================================================================
            with torch.no_grad():
                # =============================================================================
                #                 print(start, end)
                #                 print(sys.getsizeof(batch))
                # =============================================================================
                torch.cuda.synchronize()
                start_time = time.time()
                sr_batch = model(batch)
                torch.cuda.synchronize()
                end_time = time.time()
                processing_time = end_time - start_time
                total_EDSR_time += processing_time
                info = (info + "\tModel Starts: " + str(start_time) +
                        "Model total: " + str(total_EDSR_time))

            torch.cuda.synchronize()
            gpu_to_cpu_timer = ut.timer()
            sr_batch = sr_batch.to("cpu")
            torch.cuda.synchronize()
            gpu_to_cpu_time += gpu_to_cpu_timer.toc()
            info = (info + "\tGPU 2 CPU Starts: " + str(gpu_to_cpu_timer.t0) +
                    "G2C total: " + str(gpu_to_cpu_time))
            _, _, patch_height, patch_width = sr_batch.size()
            logger.info(info)
            batch_id = 0
            merging_timer = ut.timer()
            for p in range(start, end):
                output_image[:, patch_list[p][3][0]:patch_list[p][3][
                    1], patch_list[p][3][2]:patch_list[p][3][3], ] = sr_batch[
                        batch_id][:, patch_list[p][2][0]:patch_list[p][2][1],
                                  patch_list[p][2][2]:patch_list[p][2][3], ]
                batch_id += 1

            merging_time += merging_timer.toc()
            cuda_clear_timer = ut.timer()
            ut.clear_cuda(batch, None)
            cuda_clear_time += cuda_clear_timer.toc()
        except RuntimeError as err:
            ut.clear_cuda(batch, None)
            raise Exception(err)
    model = model.to("cpu")

    if print_timer:
        print("Total upsampling time: {}\n".format(total_EDSR_time))
        print("Total CPU to GPU shifting time: {}\n".format(cpu_to_gpu_time))
        print("Total GPU to CPU shifting time: {}\n".format(gpu_to_cpu_time))
        print("Total batch creation time: {}\n".format(batch_creating_time))
        print("Total merging time: {}\n".format(merging_time))
        print("Total CUDA clear time: {}\n".format(cuda_clear_time))
        print("Total time: {}\n".format(total_EDSR_time + cpu_to_gpu_time +
                                        gpu_to_cpu_time + batch_creating_time +
                                        cuda_clear_time + merging_time))
    return output_image, (
        total_EDSR_time,
        cpu_to_gpu_time,
        gpu_to_cpu_time,
        batch_creating_time,
        cuda_clear_time,
        merging_time,
    )
def do_binary_search(model_name, start_dim):
    """
    Binary search function...

    Returns
    -------
    None.

    """
    # Prints the header banner
    banner = pyfiglet.figlet_format("Binary Search: " + model_name)
    print(banner)

    # Getting logger
    logger = ut.get_logger()

    # Check valid model or not
    if model_name not in ["EDSR", "RRDB"]:
        logger.exception("{} model is unkknown".format(model_name))
        raise Exception("Unknown model...")

    # Device type cpu or cuda
    device = ut.get_device_type()

    if device == "cpu" and model_name not in ["EDSR"]:
        logger.exception("{} model cannot be run in CPU".format(model_name))
        raise Exception("{} model cannot be run in CPU".format(model_name))

    # Device information
    _, device_name = ut.get_device_details()

    if device == "cuda":
        logger.info("Device: {}, Device Name: {}".format(device, device_name))
        ut.get_gpu_details(
            device,
            "Before binary search: {}".format(model_name),
            logger,
            print_details=True,
        )
    else:
        logger.info("Device: {}, Device Name: {}".format(device, device_name))

    # Clearing cuda cache
    ut.clear_cuda(None, None)

    # Getting the highest unacceptable dimension which is a power of 2
    max_unacceptable_dimension = maximum_unacceptable_dimension_2n(
        device, logger, start_dim=start_dim, model_name=model_name)
    print("\nMaximum unacceptable dimension: {}\n".format(
        max_unacceptable_dimension))

    # Clearing cuda cache
    ut.clear_cuda(None, None)

    # Getting the maximum acceptable dimension
    max_dim = maximum_acceptable_dimension(device,
                                           logger,
                                           None,
                                           max_unacceptable_dimension,
                                           model_name=model_name)
    print("\nMaximum acceptable dimension: {}\n".format(max_dim))

    # Clearing cuda cache
    ut.clear_cuda(None, None)

    # For batch processing
    config = toml.load("../batch_processing.toml")
    config["end_patch_dimension"] = max_dim
    f = open("../batch_processing.toml", "w")
    toml.dump(config, f)

    # for linear search
    config = toml.load("../config.toml")
    config["max_dim"] = max_dim
    f = open("../config.toml", "w")
    toml.dump(config, f)
Ejemplo n.º 30
0
__author__ = "jerome.colin'at'cesbio.cnes.fr"
__license__ = "MIT"
__version__ = "1.0.3"

import os
import utilities
import Roi
import Product
import numpy
import pytest
from sklearn.metrics import mean_squared_error

TEST_DATA_PATH = os.environ['TEST_DATA_PATH']

logger = utilities.get_logger('test_utilities', verbose=True)


def test_rmse_single_vector():
    logger.debug("test_rmse_single_vector")
    v1 = numpy.random.rand(100)
    rmse_0 = utilities.rmse(v1, v1)
    assert rmse_0 == 0


def test_mse_along_random_vector():
    logger.debug("test_mse_along_random_vector")
    v1 = numpy.random.rand(100)
    v2 = numpy.random.rand(100)
    mse_utilities = utilities.mse(v1, v2)
    mse_sklearn = mean_squared_error(v1, v2)
Ejemplo n.º 31
0
__author__ = "jerome.colin'at'cesbio.cnes.fr"
__license__ = "MIT"
__version__ = "1.0.3"

import os
import pytest
import Product
import Roi
import utilities
import numpy
import osgeo.gdal
from matplotlib import pylab as pl

TEST_DATA_PATH = os.environ['TEST_DATA_PATH']

logger = utilities.get_logger('test_Product', verbose=True)

# TEST REFACTORED PRODUCT FROM HERE

## TESTING PRODUCT_DIR (DEFAULT)
def test_product_dir():
    logger.info("TESTING PRODUCT_DIR (DEFAULT)")
    p_dir = Product.Product(TEST_DATA_PATH + "acix_carpentras/SENTINEL2A_20171007-103241-161_L2A_T31TFJ_C_V1-0",
                                  logger)
    assert type(p_dir.content_list) is list

    p_dir_filename = p_dir.find_band("SRE_B4.")
    assert p_dir_filename == TEST_DATA_PATH + "acix_carpentras/SENTINEL2A_20171007-103241-161_L2A_T31TFJ_C_V1-0/SENTINEL2A_20171007-103241-161_L2A_T31TFJ_C_V1-0_SRE_B4.tif"

    p_dir_band = p_dir.get_band(p_dir.find_band("SRE_B4."))
    assert type(p_dir_band) is numpy.ndarray
Ejemplo n.º 32
0
 def __init__(self, error_flag):
     self.log = get_logger("game")
     self.error_flag = error_flag
Ejemplo n.º 33
0
class FM_load_testing(TaskSet):
	log = utilities.get_logger(logfile)

	def on_start(self):
		""" on_start is called when a Locust start before 
			any task is scheduled
		"""
		self.set_global_variables()
		# if len(USER_CREDENTIALS) > 0:
		# 	self.username, self.password = USER_CREDENTIALS.pop() #removes username/password from USER_CREDENTIALS list after being hatched		
		# 	self.log.info("Created: {} | {}".format(self.username, self.password))


	def set_global_variables(self):
		global client_secret
		with open(client_secret) as file:
			data = json.load(file)
		self.user_id = data['user_id'] #get from Manage Users > click on User > get the alphanumeric code in URL bar; also available in Local Storage of Developer Tools
		self.client_id = data['client_id'] #get from API Keys > client_id
		self.username = data['username']
		self.password = data['password']

	# FETCH
	@task(1) #@task(n) where n is the ratio of how each function will run in the given swarm	
	def fetch_leads(self):
		try :			
			data = utilities.fetch(LEADS, FETCH, self)
			utilities.log_req_res(data, self)
		except:
			utilities.get_error_message(self)
			
	@task(1)	
	def fetch_customers(self):
		try :			
			data = utilities.fetch(CUSTOMERS, FETCH, self)
			utilities.log_req_res(data, self)
		except:
			utilities.get_error_message(self)
	
	@task(1)	
	def fetch_sites(self):
		try :			
			data = utilities.fetch(SITES, FETCH, self)
			utilities.log_req_res(data, self)
		except:
			utilities.get_error_message(self)

	@task(1)	
	def fetch_contacts(self):
		try :			
			data = utilities.fetch(CONTACTS, FETCH, self)
			utilities.log_req_res(data, self)
		except:
			utilities.get_error_message(self)

	@task(1)	
	def fetch_opportunities(self):
		try :			
			data = utilities.fetch(OPPORTUNITIES, FETCH, self)
			utilities.log_req_res(data, self)
		except:
			utilities.get_error_message(self)

	@task(1)	
	def fetch_jobs(self):
		try :			
			data = utilities.fetch(JOBS, FETCH, self)
			utilities.log_req_res(data, self)
		except:
			utilities.get_error_message(self)

	@task(1)	
	def fetch_assets(self):
		try :			
			data = utilities.fetch(ASSETS, FETCH, self)
			utilities.log_req_res(data, self)
		except:
			utilities.get_error_message(self)

	@task(1)	
	def fetch_customer_invoices(self):
		try :			
			data = utilities.fetch(CUSTOMER_INVOICES, FETCH, self)
			utilities.log_req_res(data, self)
		except:
			utilities.get_error_message(self)

	@task(1)	
	def fetch_purchase_orders(self):
		try :			
			data = utilities.fetch(PURCHASE_ORDERS, FETCH, self)
			utilities.log_req_res(data, self)
		except:
			utilities.get_error_message(self)

	@task(1)	
	def fetch_supplier_invoices(self):
		try :			
			data = utilities.fetch(SUPPLIER_INVOICES, FETCH, self)
			utilities.log_req_res(data, self)
		except:
			utilities.get_error_message(self)

	@task(1)
	def fetch_tax_codes(self):
		try :			
			data = utilities.fetch(TAX_CODES, FETCH, self)
			utilities.log_req_res(data, self)
		except:
			utilities.get_error_message(self)

	@task(1)
	def fetch_account_codes(self):
		try:
			data = utilities.fetch(ACCOUNT_CODES, FETCH, self)
			utilities.log_req_res(data, self)
		except:
			utilities.get_error_message(self)

	# CREATE	
	@task(1) #@task(n) where n is the ratio of how each function will run in the given swarm	
	def create_leads(self):
		try :			
			data = utilities.create(LEADS, CREATE, self)
			utilities.log_req_res(data, self)
		except:
			utilities.get_error_message(self)
			
	@task(1)	
	def create_customers(self):
		try :			
			data = utilities.create(CUSTOMERS, CREATE, self)
			utilities.log_req_res(data, self)
		except:
			utilities.get_error_message(self)
	
	@task(1)	
	def create_sites(self):
		try :			
			data = utilities.create(SITES, CREATE, self)
			utilities.log_req_res(data, self)
		except:
			utilities.get_error_message(self)

	@task(1)	
	def create_contacts(self):
		try :			
			data = utilities.create(CONTACTS, CREATE, self)
			utilities.log_req_res(data, self)
		except:
			utilities.get_error_message(self)

	@task(1)	
	def create_opportunities(self):
		try :			
			data = utilities.create(OPPORTUNITIES, CREATE, self)
			utilities.log_req_res(data, self)
		except:
			utilities.get_error_message(self)

	@task(1)	
	def create_jobs(self):
		try :			
			data = utilities.create(JOBS, CREATE, self)
			utilities.log_req_res(data, self)
		except:
			utilities.get_error_message(self)

	@task(1)	
	def create_assets(self):
		try :			
			data = utilities.create(ASSETS, CREATE, self)
			utilities.log_req_res(data, self)
		except:
			utilities.get_error_message(self)

	@task(1)	
	def create_customer_invoices(self):
		try :			
			data = utilities.create(CUSTOMER_INVOICES, CREATE, self)
			utilities.log_req_res(data, self)
		except:
			utilities.get_error_message(self)

	@task(1)	
	def create_purchase_orders(self):
		try :			
			data = utilities.create(PURCHASE_ORDERS, CREATE, self)
			utilities.log_req_res(data, self)
		except:
			utilities.get_error_message(self)

	@task(1)	
	def create_supplier_invoices(self):
		try :			
			data = utilities.create(SUPPLIER_INVOICES, CREATE, self)
			utilities.log_req_res(data, self)
		except:
			utilities.get_error_message(self)

	@task(1)	
	def create_tax_codes(self):
		try :			
			data = utilities.create(TAX_CODES, CREATE, self)
			utilities.log_req_res(data, self)
		except:
			utilities.get_error_message(self)

	@task(1)	
	def create_account_codes(self):
		try :			
			data = utilities.create(ACCOUNT_CODES, CREATE, self)
			utilities.log_req_res(data, self)
		except:
			utilities.get_error_message(self)

	# UPDATE
	@task(1)	
	def update_leads(self):
		try :			
			data = utilities.update(LEADS, UPDATE, self)
			utilities.log_req_res(data, self)
		except:
			utilities.get_error_message(self)
			
	@task(1)	
	def update_customers(self):
		try :			
			data = utilities.update(CUSTOMERS, UPDATE, self)
			utilities.log_req_res(data, self)
		except:
			utilities.get_error_message(self)
	
	@task(1)	
	def update_sites(self):
		try :			
			data = utilities.update(SITES, UPDATE, self)
			utilities.log_req_res(data, self)
		except:
			utilities.get_error_message(self)

	@task(1)	
	def update_contacts(self):
		try :			
			data = utilities.update(CONTACTS, UPDATE, self)
			utilities.log_req_res(data, self)
		except:
			utilities.get_error_message(self)

	@task(1)	
	def update_opportunities(self):
		try :			
			data = utilities.update(OPPORTUNITIES, UPDATE, self)
			utilities.log_req_res(data, self)
		except:
			utilities.get_error_message(self)

	@task(1)	
	def update_jobs(self):
		try :			
			data = utilities.update(JOBS, UPDATE, self)
			utilities.log_req_res(data, self)
		except:
			utilities.get_error_message(self)

	@task(1)	
	def update_assets(self):
		try :			
			data = utilities.update(ASSETS, UPDATE, self)
			utilities.log_req_res(data, self)
		except:
			utilities.get_error_message(self)

	@task(1)	
	def update_customer_invoices(self):
		try :			
			data = utilities.update(CUSTOMER_INVOICES, UPDATE, self)
			utilities.log_req_res(data, self)
		except:
			utilities.get_error_message(self)

	@task(1)	
	def update_purchase_orders(self):
		try :			
			data = utilities.update(PURCHASE_ORDERS, UPDATE, self)
			utilities.log_req_res(data, self)
		except:
			utilities.get_error_message(self)

	@task(1)	
	def update_supplier_invoices(self):
		try :			
			data = utilities.update(SUPPLIER_INVOICES, UPDATE, self)
			utilities.log_req_res(data, self)
		except:
			utilities.get_error_message(self)

	@task(1)	
	def update_tax_codes(self):
		try :			
			data = utilities.update(TAX_CODES, UPDATE, self)
			utilities.log_req_res(data, self)
		except:
			utilities.get_error_message(self)

	@task(1)	
	def update_account_codes(self):
		try :			
			data = utilities.update(ACCOUNT_CODES, UPDATE, self)
			utilities.log_req_res(data, self)
		except:
			utilities.get_error_message(self)
	
	# SEARCH
	@task(1)	
	def search_leads(self):
		try :			
			data = utilities.search(LEADS, SEARCH, self)
			utilities.log_req_res(data, self)
		except:
			utilities.get_error_message(self)
			
	@task(1)	
	def search_customers(self):
		try :			
			data = utilities.search(CUSTOMERS, SEARCH, self)
			utilities.log_req_res(data, self)
		except:
			utilities.get_error_message(self)
	
	@task(1)	
	def search_sites(self):
		try :			
			data = utilities.search(SITES, SEARCH, self)
			utilities.log_req_res(data, self)
		except:
			utilities.get_error_message(self)

	@task(1)	
	def search_contacts(self):
		try :			
			data = utilities.search(CONTACTS, SEARCH, self)
			utilities.log_req_res(data, self)
		except:
			utilities.get_error_message(self)

	@task(1)	
	def search_opportunities(self):
		try :			
			data = utilities.search(OPPORTUNITIES, SEARCH, self)
			utilities.log_req_res(data, self)
		except:
			utilities.get_error_message(self)

	@task(1)	
	def search_jobs(self):
		try :			
			data = utilities.search(JOBS, SEARCH, self)
			utilities.log_req_res(data, self)
		except:
			utilities.get_error_message(self)

	@task(1)	
	def search_assets(self):
		try :			
			data = utilities.search(ASSETS, SEARCH, self)
			utilities.log_req_res(data, self)
		except:
			utilities.get_error_message(self)

	@task(1)	
	def search_customer_invoices(self):
		try :			
			data = utilities.search(CUSTOMER_INVOICES, SEARCH, self)
			utilities.log_req_res(data, self)
		except:
			utilities.get_error_message(self)

	@task(1)	
	def search_purchase_orders(self):
		try :			
			data = utilities.search(PURCHASE_ORDERS, SEARCH, self)
			utilities.log_req_res(data, self)
		except:
			utilities.get_error_message(self)

	@task(1)	
	def search_supplier_invoices(self):
		try :			
			data = utilities.search(SUPPLIER_INVOICES, SEARCH, self)
			utilities.log_req_res(data, self)
		except:
			utilities.get_error_message(self)

	@task(1)	
	def search_tax_codes(self):
		try :			
			data = utilities.search(TAX_CODES, SEARCH, self)
			utilities.log_req_res(data, self)
		except:
			utilities.get_error_message(self)

	@task(1)	
	def search_account_codes(self):
		try :			
			data = utilities.search(ACCOUNT_CODES, SEARCH, self)
			utilities.log_req_res(data, self)
		except:
			utilities.get_error_message(self)
Ejemplo n.º 34
0
def main():
    try:
        # Configuration parameters:
        token_types_keyword = "Token_Types"
        token_generators_keyword = "Token_Generators"
        counters_keyword = "Counters"
        input_keys_keyword = "Input_Keys"
        process_keyword = "Process"

        # Get logger object
        logger = utilities.get_logger()
        logger.info("Starting objects instatiation")
        # Config file complete path
        # config_file_path = (
        #     "/home/sidharth/Dropbox/QM-Redesign/Configuration/config-0.yaml")
        config_file_path = (
            "/home/pi/Documents/QM-Redesign/Configuration/config-0.yaml")
        cm = configuration.Config_Manager(config_file_path)
        config_dict = cm.read_config()
        logger.debug("Configuration Dictionary:\n" + str(config_dict))

        # Instantiate Super_Queue
        sq = instantiate_super_queue(config_dict, token_types_keyword)
        logger.debug("Super_Queue instance:\n" + str(sq))

        # Instantiate counters and store them in a list
        counters_list = instantiate_counters(config_dict, counters_keyword,
                                             token_types_keyword,
                                             input_keys_keyword, logger)
        logger.debug("Counters list:\n" + str(counters_list))

        # Instantiate token generators
        token_generators_list, token_generator_dict = (
            instantiate_token_generators(
                config_dict, token_generators_keyword, logger))
        logger.debug("Token generators list:\n" + str(counters_list))

        logger.debug("Initializing bus..")
        bus = smbus.SMBus(1)


        logger.info("Starting main loop")

        thread_id = -1
        accept_threads_list = []
        # Thread lock
        lock = threading.Lock()
        while True:
            ch = getchar()
            logger.debug("Pressed key: " + ch)
            if ch == 'q':
                exit()

            # Token generators: issue tokens upon press of assigned keys
            for generator_x in token_generators_list:
                name_x = generator_x.get_name()
                types_dict_x = get_types_dict(generator_x, token_generator_dict,
                                              token_types_keyword)
                for key, value in types_dict_x.iteritems():
                    if value == ch:
                        type_requested = key
                        logger.debug("Requested type: " + type_requested)
                        thread_id += 1
                        logger.debug("Creating thread for token type: " +
                                     type_requested)
                        logger.debug("Supported types: " +
                                     str(generator_x.get_token_types_list()))
                        accept_threads_list.append(
                            thread_classes.Accept_Customer_Thread(
                                threadID=thread_id, sq=sq,
                                token_generator=generator_x,
                                requested_token_type=type_requested,
                                logger=logger, lock=lock))
                        logger.debug("Thread: " + str(thread_id))
                        accept_threads_list[len(accept_threads_list) -1].start()
                        logger.debug("Threads (issue token) list: " +
                                     str(accept_threads_list))

            # Scan counters to check if key pressed is that for processing
            # tokens at any of the counters
            for counter_x in counters_list:
                counter_instance = counter_x[0]
                counter_process_key = counter_x[1][process_keyword]
                if ch == counter_process_key:
                    service_counter(counter_instance, sq, logger)
                    break

        for accept_thread in accept_threads_list:
            accept_thread.join()

    except Exception as error:
        utilities.show_exception_info(error)
        exit(1)
Ejemplo n.º 35
0
 def __init__(self, game):
     self.event_listener = game.event_listener
     self.log = get_logger("network")
     self.socket_handler = SocketHandler()
Ejemplo n.º 36
0
def do_linear_search(test=False, test_dim=32):
    """
    Linear search function...

    Returns
    -------
    None.

    """
    logger = ut.get_logger()

    device = "cuda"
    model_name = "EDSR"
    config = toml.load("../config.toml")
    run = config["run"]
    scale = int(config["scale"]) if config["scale"] else 4
    # device information
    _, device_name = ut.get_device_details()
    total, _, _ = ut.get_gpu_details(
        device, "\nDevice info:", logger, print_details=False
    )
    log_message = (
        "\nDevice: "
        + device
        + "\tDevice name: "
        + device_name
        + "\tTotal memory: "
        + str(total)
    )
    logger.info(log_message)

    ut.clear_cuda(None, None)

    state = "Before loading model: "
    total, used, _ = ut.get_gpu_details(device, state, logger, print_details=True)

    model = md.load_edsr(device=device)

    state = "After loading model: "
    total, used, _ = ut.get_gpu_details(device, state, logger, print_details=True)

    # =============================================================================
    #     file = open("temp_max_dim.txt", "r")
    #     line = file.read()
    #     max_dim = int(line.split(":")[1])
    # =============================================================================
    config = toml.load("../config.toml")
    max_dim = int(config["max_dim"])
    if test == False:
        detailed_result, memory_used, memory_free = result_from_dimension_range(
            device, logger, config, model, 1, max_dim
        )
    else:
        detailed_result, memory_used, memory_free = result_from_dimension_range(
            device, logger, config, model, test_dim, test_dim
        )
    if test == False:
        # get mean
        # get std
        mean_time, std_time = ut.get_mean_std(detailed_result)
        mean_memory_used, std_memory_used = ut.get_mean_std(memory_used)
        mean_memory_free, std_memory_free = ut.get_mean_std(memory_free)

        # make folder for saving results
        plt_title = "Model: {} | GPU: {} | Memory: {} MB".format(
            model_name, device_name, total
        )
        date = "_".join(str(time.ctime()).split())
        date = "_".join(date.split(":"))
        foldername = date
        os.mkdir("results/" + foldername)
        # plot data
        ut.plot_data(
            foldername,
            "dimension_vs_meantime",
            mean_time,
            "Dimensionn of Patch(nxn)",
            "Mean Processing Time: LR -> SR, Scale: {} ( {} runs )".format(scale, run),
            mode="mean time",
            title=plt_title,
        )
        ut.plot_data(
            foldername,
            "dimension_vs_stdtime",
            std_time,
            "Dimension n of Patch(nxn)",
            "Std of Processing Time: LR -> SR, Scale: {} ( {} runs )".format(
                scale, run
            ),
            mode="std time",
            title=plt_title,
        )
        ut.plot_data(
            foldername,
            "dimension_vs_meanmemoryused",
            mean_memory_used,
            "Dimension n of Patch(nxn)",
            "Mean Memory used: LR -> SR, Scale: {} ( {} runs )".format(scale, run),
            mode="mean memory used",
            title=plt_title,
        )
        ut.plot_data(
            foldername,
            "dimension_vs_stdmemoryused",
            std_memory_used,
            "Dimension n of Patch(nxn)",
            "Std Memory Used: LR -> SR, Scale: {} ( {} runs )".format(scale, run),
            mode="std memory used",
            title=plt_title,
        )
        ut.plot_data(
            foldername,
            "dimension_vs_meanmemoryfree",
            mean_memory_free,
            "Dimension n of Patch(nxn)",
            "Mean Memory Free: LR -> SR, Scale: {} ( {} runs )".format(scale, run),
            mode="mean memory free",
            title=plt_title,
        )
        ut.plot_data(
            foldername,
            "dimension_vs_stdmemoryfree",
            std_memory_free,
            "Dimension n of Patch(nxn)",
            "Std Memory Free: LR -> SR, Scale: {} ( {} runs )".format(scale, run),
            mode="std memory free",
            title=plt_title,
        )
        # save data
        ut.save_csv(
            foldername,
            "total_stat",
            device,
            device_name,
            total,
            mean_time,
            std_time,
            mean_memory_used,
            std_memory_used,
            mean_memory_free,
            std_memory_free,
        )
Ejemplo n.º 37
0
def run_experiment(chop_type, start_dim, end_dim, dim_gap, shave, scale,
                   stat_csv):
    """
    Driver for runnign three experiments

    Parameters
    ----------
    chop_type : char
        experiment type.
    start_dim : int
        image starting dimension.
    end_dim : int
        image end dimension.
    dim_gap : int
        image dimension gap.
    shave : int
        overlapping value.
    scale : int
        lr to hr scale.
    stat_csv : char
        stat related to iterative experiment.

    Returns
    -------
    None.

    """
    logger = ut.get_logger()
    if chop_type == "iterative":
        full_result_columns = [
            "Patch Dimension",
            "Maximum Batch Size",
            "Total Patches",
            "Patch list creation time",
            "Upsampling time",
            "CPU to GPU",
            "GPU to CPU",
            "Batch creation",
            "CUDA clear time",
            "Merging time",
            "Total batch processing time",
            "Total time",
            "Image Dimension",
        ]
        date = "_".join(str(time.ctime()).split())
        date = "_".join(date.split(":"))
        full_result_df = pd.DataFrame(columns=full_result_columns)
        file_name = "patch_iterative_result" + date + ".csv"
        file = open("results/" + file_name, "a")
        full_result_df.to_csv(file, index=False)

        # all_stat = []
        for d in range(int(start_dim), int(end_dim), int(dim_gap)):
            print("Image dimension: ", d)
            command = ("python3 " + "custom_upsampler.py " + " " + stat_csv +
                       " " + str(d) + " " + str(shave) + " " + str(scale))
            p = subprocess.run(command, shell=True, capture_output=True)
            if p.returncode == 0:

                # Get the results of the current batch size
                time_stats = list(
                    map(float,
                        p.stdout.decode().split("\n")[0:12]))
                time_stats.append(d)
                stat_df = pd.DataFrame([time_stats])
                stat_df.to_csv(file, header=False, index=False)
            else:
                break
        file.close()
    elif chop_type == "recursive":
        print("\nRecursive chopping...")
        # =============================================================================
        #         config = toml.load("../batch_processing.toml")
        # =============================================================================
        max_patch_dimenison = 400
        full_result_columns = [
            "Patch Dimension",
            "Upsampling time",
            "Total time",
            "Image Dimension",
        ]
        date = "_".join(str(time.ctime()).split())
        date = "_".join(date.split(":"))
        full_result_df = pd.DataFrame(columns=full_result_columns)
        file_name = "recursive_result" + date + ".csv"
        file = open("results/" + file_name, "a")
        full_result_df.to_csv(file, index=False)

        # all_stat = []
        for d in range(int(start_dim), int(end_dim), int(dim_gap)):
            print("Image dimension: ", d)
            command = ("python3 " + "forward_chop_recursive.py " + " " +
                       str(d) + " " + str(max_patch_dimenison))
            p = subprocess.run(command, shell=True, capture_output=True)
            if p.returncode == 0:

                # Get the results of the current batch size
                # print(p.stdout.decode().split("\n"))
                time_stats = [max_patch_dimenison]
                time_stats += list(
                    map(float,
                        p.stdout.decode().split("\n")[0:2]))
                time_stats.append(d)
                stat_df = pd.DataFrame([time_stats])
                stat_df.to_csv(file, header=False, index=False)
            else:
                logger.error(
                    "CUDA memory error at image dimension: {} and patch dimension: {}"
                    .format(d, max_patch_dimenison))
                break
        file.close()
    elif chop_type == "patch_iterative":
        print("\nIterative chopping...")
        config = toml.load("../batch_processing.toml")
        max_patch_dimenison = config["end_patch_dimension"]
        full_result_columns = [
            "Patch Dimension",
            "Upsampling time",
            "Cropping time",
            "Shifting time",
            "Clearing time",
            "Total time",
            "Image Dimension",
        ]
        date = "_".join(str(time.ctime()).split())
        date = "_".join(date.split(":"))
        full_result_df = pd.DataFrame(columns=full_result_columns)
        file_name = "patch_iterative_result" + date + ".csv"
        file = open("results/" + file_name, "a")
        full_result_df.to_csv(file, index=False)

        # all_stat = []
        for d in range(int(start_dim), int(end_dim), int(dim_gap)):
            print("Image dimension: ", d)
            command = ("python3 " + "forward_chop.py " + " " + str(d) + " " +
                       str(max_patch_dimenison))
            p = subprocess.run(command, shell=True, capture_output=True)
            if p.returncode == 0:

                # Get the results of the current batch size
                # print(p.stdout.decode().split("\n"))
                time_stats = [max_patch_dimenison]
                time_stats += list(
                    map(float,
                        p.stdout.decode().split("\n")[0:5]))
                time_stats.append(d)
                stat_df = pd.DataFrame([time_stats])
                stat_df.to_csv(file, header=False, index=False)
            else:
                logger.error(
                    "CUDA memory error at image dimension: {} and patch dimension: {}"
                    .format(d, max_patch_dimenison))
                break
        file.close()
Ejemplo n.º 38
0
 def __init__(self, world):
     self.__log = get_logger("entity_manager")
     self.__world = world
Ejemplo n.º 39
0
    # Main loop
    while not exit_flag.is_set():
        safe_call(client, log, error_flag, exit_flag, True)
        if error_flag.is_set():
            if error_count < ERROR_COUNT_MAX:
                error_flag.clear()
                error_count += 1
            else:
                break

    if error_flag.is_set():
        show_message(log, logging.ERROR, "Error occurred %s times in client loop" % error_count)
    else:
        log.info("Finished correctly")


if __name__ == '__main__':
    try:
        open(get_base_dir() + sep + LOG_FILENAME, "w").close() # Clear log
        error_log = get_logger("Init", [stderr])
        init_error_flag = Event()
        main = client_main if flag_client_mode() else server_main
        safe_call(main, error_log, init_error_flag, None, False)
        if init_error_flag.is_set():
            show_message(error_log, logging.ERROR, "Error at main")
    except Exception as e:
        if not flag_verbose_mode():
            show_message(None, logging.ERROR, "Error at main: %s" % e)
        raise e
    exit()
Ejemplo n.º 40
0
"""Handles the detection"""
from time import time, sleep
from config import Config
from constants import COLOR_RED, COLOR_BLUE, COLOR_GREEN, STABILIZATION_TIME, UPDATE_SLEEP, COLOR_WHITE, APP_WINDOWNAME
from constants import DARK_INTERVAL, COOLDOWN_TRIGGER, DARK_THRESHOLD, AVERAGE_WEIGHT, BLUR_SIZE, SAME_FRAME_COUNT
from utilities import get_logger, flag_disable_dark, flag_daemon_only, flag_client_mode
from utilities import percent_to_number, number_to_percent, safe_call
import numpy
from sys import stdout


log = get_logger("Detection", [stdout])


class Detection(object):
    """The main class"""
    def __init__(self, cam_index, detected):
        self.detected = detected
        self.active = False
        self.left_pressed = False
        self.middle_pressed = False
        self.stop_flag = False
        self.dark_last = time()
        self.same_frame = 0
        self.last_hash = 0

        if not flag_client_mode():
            try:
                import cv2
                self.cv = cv2
            except ImportError as e:
Ejemplo n.º 41
0
 def __init__(self):
     """Initialize the DataInserterActor class.
     """
     super(DataInserterActor, self).__init__()
     self._log = get_logger()
Ejemplo n.º 42
0
def check_differnet_patches_in_forward_chop(min_dim,
                                            max_dim,
                                            shave,
                                            image_path,
                                            gap=1,
                                            run=1,
                                            device="cuda"):
    """
    Experiments iterative forward chop for different patch dimensions

    Parameters
    ----------
    min_dim : int
        starting patch dimension.
    max_dim : int
        ending patch dimension.
    shave : int
        overlapping pixel amount.
    image_path : str
        image path.
    gap : int, optional
        gap between two patch dimension. The default is 1.
    run : int, optional
        total number of run for averaging. The default is 1.
    device : str, optional
        GPU or CPU. The default is "cuda".

    Raises
    ------
    Exception
        DESCRIPTION.

    Returns
    -------
    None.

    """
    logger = ut.get_logger()
    logger.info(
        "Checking different patches with dimension from {} to {} in iterative forward chop...\n"
        .format(min_dim, max_dim))
    model_name = "EDSR"
    device_name = "CPU"
    total_memory = "~"
    device = device
    if device == "cuda":
        _, device_name = ut.get_device_details()
        total_memory, _, _ = ut.get_gpu_details(device,
                                                "\nDevice info:",
                                                logger=None,
                                                print_details=False)
    print_result = "0"
    full_result = []
    for d in tqdm(range(min_dim, max_dim + 1, gap)):
        s = [0, 0, 0, 0, 0]
        ut.get_gpu_details("cuda",
                           "GPU status before patch: ({}x{}):".format(d, d),
                           logger)
        for r in range(run):
            temp = [d]
            command = ("python3 " + "forward_chop.py " + image_path + " " +
                       str(d) + " " + str(shave) + " " + print_result + " " +
                       device)
            p = subprocess.run(command, shell=True, capture_output=True)
            if p.returncode == 0:
                temp += list(map(float, p.stdout.decode().split()[1:]))
                s = [s[i] + temp[i] for i in range(len(temp))]
            else:
                raise Exception(p.stderr)
                break

        s = np.array(s) / run
        full_result.append(s)

    full_result = pd.DataFrame(full_result)
    full_result.columns = [
        "Dimension",
        "EDSR Processing Time",
        "Cropping time",
        "Shifting Time",
        "CUDA Cleanign Time",
    ]

    if device == "cuda":
        plt_title = "Model: {} | GPU: {} | Memory: {} MB".format(
            model_name, device_name, total_memory)
    else:
        plt_title = "Model: {} | Device: {}".format(model_name, "CPU")

    date = "_".join(str(time.ctime()).split())
    date = "_".join(date.split(":"))

    x_data, y_data = (
        np.array(full_result.iloc[:, 0].values).flatten(),
        np.array(full_result.iloc[:, 1].values).flatten(),
    )
    x_label = "Dimension n of patch(nxn)"
    y_label = "Processing time (sec): LR -> SR"
    plt.xlabel(x_label)
    plt.ylabel(y_label)
    plt.title(plt_title)
    plt.plot(x_data, y_data)
    plt.savefig(
        "results/forward_chop_experiment/{0}.png".format("processing_time_" +
                                                         date))
    plt.show()

    x_data, y_data = (
        np.array(full_result.iloc[:, 0].values).flatten(),
        np.array(full_result.iloc[:, 2].values).flatten(),
    )
    x_label = "Dimension n of patch(nxn)"
    y_label = "Cropping time (sec): LR -> SR"
    plt.xlabel(x_label)
    plt.ylabel(y_label)
    plt.title(plt_title)
    plt.plot(x_data, y_data)
    plt.savefig(
        "results/forward_chop_experiment/{0}.png".format("cropping_time_" +
                                                         date))
    plt.show()

    x_data, y_data = (
        np.array(full_result.iloc[:, 0].values).flatten(),
        np.array(full_result.iloc[:, 3].values).flatten(),
    )
    x_label = "Dimension n of patch(nxn)"
    y_label = "Shifting time (sec): LR -> SR"
    plt.xlabel(x_label)
    plt.ylabel(y_label)
    plt.title(plt_title)
    plt.plot(x_data, y_data)
    plt.savefig(
        "results/forward_chop_experiment/{0}.png".format("shfiting_time_" +
                                                         date))
    plt.show()

    x_data, y_data = (
        np.array(full_result.iloc[:, 0].values).flatten(),
        np.array(full_result.iloc[:, 4].values).flatten(),
    )
    x_label = "Dimension n of patch(nxn)"
    y_label = "Cleaning time (sec): LR -> SR"
    plt.xlabel(x_label)
    plt.ylabel(y_label)
    plt.title(plt_title)
    plt.plot(x_data, y_data)
    plt.savefig("results/forward_chop_experiment/{0}.png".format(
        "cuda_cleaning_time_" + date))
    plt.show()

    filename = "stat_" + "EDSR_forward_processing_iterative_" + date
    file = open("results/" + "forward_chop_experiment" + "/" + filename, "a")
    file.write(device + "\n")
    file.write(device_name + "\n")
    file.write("Memory: " + str(total_memory) + "MB\n")
    full_result.to_csv(file)
    file.close()
Ejemplo n.º 43
0
# -*- coding: utf-8 -*-
"""

Testing the custom response.
WARNING: TO BE REDEFINED.

"""

# from ..base import ExtendedApiResource
# from restapi.rest.definition import EndpointResource
# from restapi import decorators as decorate
from utilities import get_logger

log = get_logger(__name__)

log.warning("Custom response: TO BE DEVELOPED AGAIN!")

############################################################
# # OPTION 1

# @decorate.custom_response
# def fedapp_response(
#         defined_content=None,
#         code=None,
#         errors={},
#         headers={}):

# return response, code, errors, headers

#     return ExtendedApiResource.flask_response("Hello")
from azure.servicebus.aio import ServiceBusClient, ServiceBusSender, ServiceBusReceiver
from azure.servicebus import ServiceBusMessage
from azure.servicebus.aio._base_handler_async import ServiceBusSharedKeyCredential
from azure.servicebus.exceptions import (ServiceBusError,
                                         ServiceBusAuthenticationError,
                                         ServiceBusAuthorizationError)
from devtools_testutils import AzureMgmtTestCase, CachedResourceGroupPreparer
from servicebus_preparer import (CachedServiceBusNamespacePreparer,
                                 ServiceBusTopicPreparer,
                                 ServiceBusQueuePreparer,
                                 ServiceBusNamespaceAuthorizationRulePreparer,
                                 ServiceBusQueueAuthorizationRulePreparer,
                                 CachedServiceBusQueuePreparer)
from utilities import get_logger

_logger = get_logger(logging.DEBUG)


class ServiceBusClientAsyncTests(AzureMgmtTestCase):
    @pytest.mark.liveTest
    @pytest.mark.live_test_only
    @CachedResourceGroupPreparer(name_prefix='servicebustest')
    @CachedServiceBusNamespacePreparer(name_prefix='servicebustest')
    @CachedServiceBusQueuePreparer(name_prefix='servicebustest',
                                   dead_lettering_on_message_expiration=True)
    async def test_sb_client_bad_credentials_async(self, servicebus_namespace,
                                                   servicebus_queue, **kwargs):
        client = ServiceBusClient(
            fully_qualified_namespace=servicebus_namespace.name +
            '.servicebus.windows.net',
            credential=ServiceBusSharedKeyCredential('invalid', 'invalid'),
Ejemplo n.º 45
0
Archivo: app.py Proyecto: ckier/shipdb
""" Flask web app providing shipdb api.
"""

from api_1_0 import api as api_1_0_blueprint
import config
from flask import Flask, redirect, url_for, request, jsonify, render_template
from flask_cors import CORS
from output_manager import BaseOutputManagerActor
from utilities import configure_logger, get_logger
from conductor import start_conductor, shutdown_conductor

app = Flask(__name__)
CORS(app)
config.load(app.config)
configure_logger()
log = get_logger()

app.register_blueprint(api_1_0_blueprint, url_prefix='/api/1')


@app.route('/')
def index():
    """ Show the index page.
    """
    return render_template('index.html')


@app.route('/shutdown')
def shutdown():
    shutdown_conductor()
    shutdown_server()